31Hypre::State Hypre::state = Hypre::State::UNINITIALIZED;
35 if (state != State::INITIALIZED)
37#if MFEM_HYPRE_VERSION >= 21900
47 state = State::INITIALIZED;
55#if defined(HYPRE_USING_GPU) && (MFEM_HYPRE_VERSION >= 23100)
58 MFEM_VERIFY(HYPRE_Initialized(),
"HYPRE must be initialized before"
59 " calling Hypre::InitDevice()");
62 HYPRE_SetMemoryLocation(HYPRE_MEMORY_DEVICE);
63 HYPRE_SetExecutionPolicy(HYPRE_EXEC_DEVICE);
64 HYPRE_DeviceInitialize();
68 HYPRE_SetMemoryLocation(HYPRE_MEMORY_HOST);
69 HYPRE_SetExecutionPolicy(HYPRE_EXEC_HOST);
77 if (state != State::UNINITIALIZED)
79#if MFEM_HYPRE_VERSION >= 21900
83 state = State::UNINITIALIZED;
86void Hypre::SetDefaultOptions()
91#if MFEM_HYPRE_VERSION >= 22100
92#ifdef HYPRE_USING_CUDA
94 HYPRE_SetSpGemmUseCusparse(0);
95#elif defined(HYPRE_USING_HIP)
100 HYPRE_SetSpMVUseVendor(0);
126template<
typename TargetT,
typename SourceT>
127static TargetT *DuplicateAs(
const SourceT *array,
int size,
128 bool cplusplus =
true)
130 TargetT *target_array = cplusplus ? (TargetT*) Memory<TargetT>(size)
131 : mfem_hypre_TAlloc_host(TargetT, size);
132 for (
int i = 0; i < size; i++)
134 target_array[i] = array[i];
157inline void HypreParVector::_SetDataAndSize_()
159 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
160#if !defined(HYPRE_USING_GPU)
162 internal::to_int(hypre_VectorSize(x_loc)));
164 size = internal::to_int(hypre_VectorSize(x_loc));
165 MemoryType mt = (hypre_VectorMemoryLocation(x_loc) == HYPRE_MEMORY_HOST
167 if (hypre_VectorData(x_loc) != NULL)
181 x = hypre_ParVectorCreate(comm,glob_size,col);
182 hypre_ParVectorInitialize(x);
183#if MFEM_HYPRE_VERSION <= 22200
184 hypre_ParVectorSetPartitioningOwner(x,0);
187 hypre_ParVectorSetDataOwner(x,1);
188 hypre_SeqVectorSetDataOwner(hypre_ParVectorLocalVector(x),1);
198 x = hypre_ParVectorCreate(comm,glob_size,col);
199 hypre_ParVectorSetDataOwner(x,1);
200 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
201 hypre_SeqVectorSetDataOwner(x_loc,0);
202#if MFEM_HYPRE_VERSION <= 22200
203 hypre_ParVectorSetPartitioningOwner(x,0);
206 hypre_VectorData(x_loc) = &tmp;
207#ifdef HYPRE_USING_GPU
208 hypre_VectorMemoryLocation(x_loc) =
209 is_device_ptr ? HYPRE_MEMORY_DEVICE : HYPRE_MEMORY_HOST;
211 MFEM_CONTRACT_VAR(is_device_ptr);
215 hypre_ParVectorInitialize(x);
217 hypre_VectorData(x_loc) = data_;
227 "the MemoryTypes of 'base' are incompatible with Hypre!");
228 MFEM_ASSERT(offset +
size <= base.
Size(),
229 "the size of 'base' is too small!");
233 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
235#ifdef HYPRE_USING_GPU
242 y.CreateCompatibleVector())
245 hypre_SeqVectorCopy(hypre_ParVectorLocalVector(y.x),
246 hypre_ParVectorLocalVector(x));
252 *
this = std::move(y);
272 x = (hypre_ParVector *) y;
281 hypre_ParVectorInitialize(x);
282#if MFEM_HYPRE_VERSION <= 22200
283 hypre_ParVectorSetPartitioningOwner(x,0);
286 hypre_ParVectorSetDataOwner(x,1);
287 hypre_SeqVectorSetDataOwner(hypre_ParVectorLocalVector(x),1);
295 result.x = hypre_ParVectorCreate(x -> comm, x -> global_size,
297 hypre_ParVectorInitialize(result.x);
298#if MFEM_HYPRE_VERSION <= 22200
299 hypre_ParVectorSetPartitioningOwner(result.x,0);
301 hypre_ParVectorSetDataOwner(result.x,1);
302 hypre_SeqVectorSetDataOwner(hypre_ParVectorLocalVector(result.x),1);
303 result._SetDataAndSize_();
304 result.own_ParVector = 1;
311 if (own_ParVector) { hypre_ParVectorDestroy(x); }
315 own_ParVector = owner;
320 hypre_Vector *hv = hypre_ParVectorToVectorAll(*
this);
321 Vector *v =
new Vector(hv->data, internal::to_int(hv->size));
323 hypre_SeqVectorSetDataOwner(hv,0);
324 hypre_SeqVectorDestroy(hv);
351 const auto own_tmp = y.own_ParVector;
353 own_ParVector = own_tmp;
354 const auto x_tmp = y.x;
362 hypre_VectorData(hypre_ParVectorLocalVector(x)) = data_;
368 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
369 hypre_VectorData(x_loc) =
371#ifdef HYPRE_USING_GPU
378 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
380#ifdef HYPRE_USING_GPU
387 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
389#ifdef HYPRE_USING_GPU
400 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
401 hypre_VectorData(x_loc) =
403#ifdef HYPRE_USING_GPU
415 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
417#ifdef HYPRE_USING_GPU
429 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
431#ifdef HYPRE_USING_GPU
439 return hypre_ParVectorSetRandomValues(x,seed);
444 hypre_ParVectorPrint(x, fname.c_str());
451 hypre_ParVectorDestroy(x);
454 x = hypre_ParVectorRead(comm, fname.c_str());
455 own_ParVector =
true;
463 hypre_ParVectorDestroy(x);
470 return hypre_ParVectorInnerProd(*x, *y);
475 return hypre_ParVectorInnerProd(x, y);
489 real_t loc_norm = vec*vec;
496 for (
int i = 0; i < vec.
Size(); i++)
498 sum += pow(fabs(vec(i)),
p);
501 norm = pow(norm, 1.0/
p);
563template <
typename SrcT,
typename DstT>
571 mfem::forall(capacity, [=] MFEM_HOST_DEVICE (
int i) { dst_p[i] = src_p[i]; });
575void HypreParMatrix::Init()
580 diagOwner = offdOwner = colMapOwner = -1;
590#if MFEM_HYPRE_VERSION >= 21800
591inline decltype(hypre_CSRMatrix::memory_location)
601 decltype(hypre_CSRMatrix::memory_location) ml;
603#if !defined(HYPRE_USING_GPU)
605 ml = HYPRE_MEMORY_HOST;
620 hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
621 hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
622 const int num_rows =
NumRows();
623 const int diag_nnz = internal::to_int(diag->num_nonzeros);
624 const int offd_nnz = internal::to_int(offd->num_nonzeros);
625 diag->i =
const_cast<HYPRE_Int*
>(mem_diag.
I.
Read(mc, num_rows+1));
626 diag->j =
const_cast<HYPRE_Int*
>(mem_diag.
J.
Read(mc, diag_nnz));
627 diag->data =
const_cast<real_t*
>(mem_diag.
data.
Read(mc, diag_nnz));
628 offd->i =
const_cast<HYPRE_Int*
>(mem_offd.
I.
Read(mc, num_rows+1));
629 offd->j =
const_cast<HYPRE_Int*
>(mem_offd.
J.
Read(mc, offd_nnz));
630 offd->data =
const_cast<real_t*
>(mem_offd.
data.
Read(mc, offd_nnz));
631#if MFEM_HYPRE_VERSION >= 21800
633 diag->memory_location = ml;
634 offd->memory_location = ml;
640 hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
641 hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
642 const int num_rows =
NumRows();
643 const int diag_nnz = internal::to_int(diag->num_nonzeros);
644 const int offd_nnz = internal::to_int(offd->num_nonzeros);
645 diag->i = mem_diag.
I.
ReadWrite(mc, num_rows+1);
648 offd->i = mem_offd.
I.
ReadWrite(mc, num_rows+1);
651#if MFEM_HYPRE_VERSION >= 21800
653 diag->memory_location = ml;
654 offd->memory_location = ml;
658void HypreParMatrix::Write(
MemoryClass mc,
bool set_diag,
bool set_offd)
660 hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
661 hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
674#if MFEM_HYPRE_VERSION >= 21800
676 if (set_diag) { diag->memory_location = ml; }
677 if (set_offd) { offd->memory_location = ml; }
695#if MFEM_HYPRE_VERSION >= 21800
696 MemoryType diag_mt = (A->diag->memory_location == HYPRE_MEMORY_HOST
698 MemoryType offd_mt = (A->offd->memory_location == HYPRE_MEMORY_HOST
704 diagOwner = HypreCsrToMem(A->diag, diag_mt,
false, mem_diag);
705 offdOwner = HypreCsrToMem(A->offd, offd_mt,
false, mem_offd);
711 hypre_CSRMatrix *hypre_csr,
726 const int num_rows = csr->
Height();
728 hypre_csr->i =
const_cast<HYPRE_Int*
>(mem_csr.
I.
Read(hypre_mc, num_rows+1));
729 hypre_csr->j =
const_cast<HYPRE_Int*
>(mem_csr.
J.
Read(hypre_mc, nnz));
730 hypre_csr->data =
const_cast<real_t*
>(mem_csr.
data.
Read(hypre_mc, nnz));
733 "invalid state: host ownership for I and J differ!");
735 (mem_csr.data.OwnsHostPtr() ? 2 : 0);
738signed char HypreParMatrix::CopyBoolCSR(Table *bool_csr,
739 MemoryIJData &mem_csr,
740 hypre_CSRMatrix *hypre_csr)
745 CopyMemory(bool_csr->GetIMemory(), mem_csr.I, hypre_mc,
false);
746 CopyMemory(bool_csr->GetJMemory(), mem_csr.J, hypre_mc,
false);
752 const int num_rows = bool_csr->Size();
753 const int nnz = bool_csr->Size_of_connections();
756 for (
int i = 0; i < nnz; i++)
760 hypre_csr->i =
const_cast<HYPRE_Int*
>(mem_csr.I.Read(hypre_mc, num_rows+1));
761 hypre_csr->j =
const_cast<HYPRE_Int*
>(mem_csr.J.Read(hypre_mc, nnz));
762 hypre_csr->data =
const_cast<real_t*
>(mem_csr.data.Read(hypre_mc, nnz));
764 MFEM_ASSERT(mem_csr.I.OwnsHostPtr() == mem_csr.J.OwnsHostPtr(),
765 "invalid state: host ownership for I and J differ!");
766 return (mem_csr.I.OwnsHostPtr() ? 1 : 0) +
767 (mem_csr.data.OwnsHostPtr() ? 2 : 0);
773static void CopyCSR_J(
const int nnz,
const MemoryIJData &mem_csr,
779 mfem::forall(nnz, [=] MFEM_HOST_DEVICE (
int i) { dst_p[i] = src_p[i]; });
784static void SyncBackCSR(SparseMatrix *csr, MemoryIJData &mem_csr)
787 const bool data_shallow =
CanShallowCopy(csr->GetMemoryData(), hypre_mc);
789#if !defined(HYPRE_BIGINT) && defined(MFEM_DEBUG)
790 const bool J_shallow =
CanShallowCopy(csr->GetMemoryJ(), hypre_mc);
791 MFEM_ASSERT(J_shallow == data_shallow,
"unsupported state");
798 csr->GetMemoryJ().Sync(mem_csr.J);
802 CopyCSR_J(csr->GetMemoryJ().Capacity(), mem_csr, csr->GetMemoryJ());
804 csr->GetMemoryData().Sync(mem_csr.data);
809static void SyncBackBoolCSR(Table *bool_csr, MemoryIJData &mem_csr)
812 const bool J_shallow =
CanShallowCopy(bool_csr->GetJMemory(), hypre_mc);
817 bool_csr->GetJMemory().Sync(mem_csr.J);
826static int GetPartitioningArraySize(MPI_Comm comm)
828 if (HYPRE_AssumedPartitionCheck())
835 MPI_Comm_size(comm, &comm_size);
836 return comm_size + 1;
845static bool RowAndColStartsAreEqual(MPI_Comm comm,
HYPRE_BigInt *rows,
848 const int part_size = GetPartitioningArraySize(comm);
849 bool are_equal =
true;
850 for (
int i = 0; i < part_size; ++i)
852 if (rows[i] != cols[i])
858 MPI_Allreduce(MPI_IN_PLACE, &are_equal, 1, MFEM_MPI_CXX_BOOL, MPI_LAND, comm);
863signed char HypreParMatrix::HypreCsrToMem(hypre_CSRMatrix *h_mat,
868 const int nr1 = internal::to_int(h_mat->num_rows) + 1;
869 const int nnz = internal::to_int(h_mat->num_nonzeros);
870 mem.I.Wrap(h_mat->i, nr1, h_mat_mt, own_ija);
871 mem.J.Wrap(h_mat->j, nnz, h_mat_mt, own_ija);
872 mem.data.Wrap(h_mat->data, nnz, h_mat_mt, own_ija);
878 h_mem.I.New(nr1, hypre_mt);
879 h_mem.I.CopyFrom(mem.I, nr1);
881 h_mem.J.New(nnz, hypre_mt);
882 h_mem.J.CopyFrom(mem.J, nnz);
884 h_mem.data.New(nnz, hypre_mt);
885 h_mem.data.CopyFrom(mem.data, nnz);
894#if MFEM_HYPRE_VERSION < 21400
895 hypre_TFree(h_mat->i);
896#elif MFEM_HYPRE_VERSION < 21800
897 hypre_TFree(h_mat->i, HYPRE_MEMORY_SHARED);
899 hypre_TFree(h_mat->i, h_mat->memory_location);
901 if (h_mat->owns_data)
903#if MFEM_HYPRE_VERSION < 21400
904 hypre_TFree(h_mat->j);
905 hypre_TFree(h_mat->data);
906#elif MFEM_HYPRE_VERSION < 21800
907 hypre_TFree(h_mat->j, HYPRE_MEMORY_SHARED);
908 hypre_TFree(h_mat->data, HYPRE_MEMORY_SHARED);
910 hypre_TFree(h_mat->j, h_mat->memory_location);
911 hypre_TFree(h_mat->data, h_mat->memory_location);
915 h_mat->i = mem.I.ReadWrite(hypre_mc, nr1);
916 h_mat->j = mem.J.ReadWrite(hypre_mc, nnz);
917 h_mat->data = mem.data.ReadWrite(hypre_mc, nnz);
918 h_mat->owns_data = 0;
919#if MFEM_HYPRE_VERSION >= 21800
930 :
Operator(diag->Height(), diag->Width())
933 A = hypre_ParCSRMatrixCreate(comm, glob_size, glob_size, row_starts,
935 hypre_ParCSRMatrixSetDataOwner(A,1);
936#if MFEM_HYPRE_VERSION <= 22200
937 hypre_ParCSRMatrixSetRowStartsOwner(A,0);
938 hypre_ParCSRMatrixSetColStartsOwner(A,0);
941 hypre_CSRMatrixSetDataOwner(A->diag,0);
942 diagOwner = CopyCSR(diag, mem_diag, A->diag,
false);
943 hypre_CSRMatrixSetRownnz(A->diag);
945 hypre_CSRMatrixSetDataOwner(A->offd,1);
946 hypre_CSRMatrixI(A->offd) = mfem_hypre_CTAlloc(HYPRE_Int, diag->
Height()+1);
954 hypre_ParCSRMatrixSetNumNonzeros(A);
958 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
959 SyncBackCSR(diag, mem_diag);
961 hypre_MatvecCommPkgCreate(A);
971 :
Operator(diag->Height(), diag->Width())
974 A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
975 row_starts, col_starts,
977 hypre_ParCSRMatrixSetDataOwner(A,1);
978#if MFEM_HYPRE_VERSION <= 22200
979 hypre_ParCSRMatrixSetRowStartsOwner(A,0);
980 hypre_ParCSRMatrixSetColStartsOwner(A,0);
983 hypre_CSRMatrixSetDataOwner(A->diag,0);
984 diagOwner = CopyCSR(diag, mem_diag, A->diag,
false);
985 hypre_CSRMatrixSetRownnz(A->diag);
987 hypre_CSRMatrixSetDataOwner(A->offd,1);
988 hypre_CSRMatrixI(A->offd) = mfem_hypre_CTAlloc(HYPRE_Int, diag->
Height()+1);
991 hypre_ParCSRMatrixSetNumNonzeros(A);
994 if (RowAndColStartsAreEqual(comm, row_starts, col_starts))
997 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
998 SyncBackCSR(diag, mem_diag);
1001 hypre_MatvecCommPkgCreate(A);
1014 :
Operator(diag->Height(), diag->Width())
1017 A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
1018 row_starts, col_starts,
1021 hypre_ParCSRMatrixSetDataOwner(A,1);
1022#if MFEM_HYPRE_VERSION <= 22200
1023 hypre_ParCSRMatrixSetRowStartsOwner(A,0);
1024 hypre_ParCSRMatrixSetColStartsOwner(A,0);
1027 hypre_CSRMatrixSetDataOwner(A->diag,0);
1028 diagOwner = CopyCSR(diag, mem_diag, A->diag, own_diag_offd);
1029 if (own_diag_offd) {
delete diag; }
1030 hypre_CSRMatrixSetRownnz(A->diag);
1032 hypre_CSRMatrixSetDataOwner(A->offd,0);
1033 offdOwner = CopyCSR(offd, mem_offd, A->offd, own_diag_offd);
1034 if (own_diag_offd) {
delete offd; }
1035 hypre_CSRMatrixSetRownnz(A->offd);
1037 hypre_ParCSRMatrixColMapOffd(A) = cmap;
1041 hypre_ParCSRMatrixSetNumNonzeros(A);
1044 if (RowAndColStartsAreEqual(comm, row_starts, col_starts))
1047 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
1049 if (!own_diag_offd) { SyncBackCSR(diag, mem_diag); }
1052 hypre_MatvecCommPkgCreate(A);
1061 HYPRE_Int *diag_i, HYPRE_Int *diag_j,
real_t *diag_data,
1062 HYPRE_Int *offd_i, HYPRE_Int *offd_j,
real_t *offd_data,
1067 A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
1068 row_starts, col_starts, offd_num_cols, 0, 0);
1069 hypre_ParCSRMatrixSetDataOwner(A,1);
1070#if MFEM_HYPRE_VERSION <= 22200
1071 hypre_ParCSRMatrixSetRowStartsOwner(A,0);
1072 hypre_ParCSRMatrixSetColStartsOwner(A,0);
1075 HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A->diag);
1077 hypre_CSRMatrixSetDataOwner(A->diag, hypre_arrays);
1078 hypre_CSRMatrixI(A->diag) = diag_i;
1079 hypre_CSRMatrixJ(A->diag) = diag_j;
1080 hypre_CSRMatrixData(A->diag) = diag_data;
1081 hypre_CSRMatrixNumNonzeros(A->diag) = diag_i[local_num_rows];
1082#ifdef HYPRE_USING_GPU
1083 hypre_CSRMatrixMemoryLocation(A->diag) = HYPRE_MEMORY_HOST;
1086 hypre_CSRMatrixSetDataOwner(A->offd, hypre_arrays);
1087 hypre_CSRMatrixI(A->offd) = offd_i;
1088 hypre_CSRMatrixJ(A->offd) = offd_j;
1089 hypre_CSRMatrixData(A->offd) = offd_data;
1090 hypre_CSRMatrixNumNonzeros(A->offd) = offd_i[local_num_rows];
1091#ifdef HYPRE_USING_GPU
1092 hypre_CSRMatrixMemoryLocation(A->offd) = HYPRE_MEMORY_HOST;
1095 hypre_ParCSRMatrixColMapOffd(A) = offd_col_map;
1097 colMapOwner = hypre_arrays ? -1 : 1;
1099 hypre_ParCSRMatrixSetNumNonzeros(A);
1102 if (RowAndColStartsAreEqual(comm, row_starts, col_starts))
1104 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
1107 hypre_MatvecCommPkgCreate(A);
1115 diagOwner = HypreCsrToMem(A->diag, host_mt,
true, mem_diag);
1116 offdOwner = HypreCsrToMem(A->offd, host_mt,
true, mem_offd);
1121 diagOwner = HypreCsrToMem(A->diag, host_mt,
false, mem_diag);
1122 offdOwner = HypreCsrToMem(A->offd, host_mt,
false, mem_offd);
1126 hypre_CSRMatrixSetRownnz(A->diag);
1127 hypre_CSRMatrixSetRownnz(A->offd);
1136 MFEM_ASSERT(sm_a != NULL,
"invalid input");
1137 MFEM_VERIFY(!HYPRE_AssumedPartitionCheck(),
1138 "this method can not be used with assumed partition");
1142 hypre_CSRMatrix *csr_a;
1143 csr_a = hypre_CSRMatrixCreate(sm_a ->
Height(), sm_a ->
Width(),
1144 sm_a -> NumNonZeroElems());
1146 hypre_CSRMatrixSetDataOwner(csr_a,0);
1148 CopyCSR(sm_a, mem_a, csr_a,
false);
1149 hypre_CSRMatrixSetRownnz(csr_a);
1153 hypre_ParCSRMatrix *new_A =
1154 hypre_CSRMatrixToParCSRMatrix(comm, csr_a, row_starts, col_starts);
1160 hypre_CSRMatrixI(csr_a) = NULL;
1161 hypre_CSRMatrixDestroy(csr_a);
1164 if (RowAndColStartsAreEqual(comm, row_starts, col_starts))
1166 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(new_A));
1169 hypre_MatvecCommPkgCreate(A);
1184 A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
1185 row_starts, col_starts, 0, nnz, 0);
1186 hypre_ParCSRMatrixSetDataOwner(A,1);
1187#if MFEM_HYPRE_VERSION <= 22200
1188 hypre_ParCSRMatrixSetRowStartsOwner(A,0);
1189 hypre_ParCSRMatrixSetColStartsOwner(A,0);
1192 hypre_CSRMatrixSetDataOwner(A->diag,0);
1193 diagOwner = CopyBoolCSR(diag, mem_diag, A->diag);
1194 hypre_CSRMatrixSetRownnz(A->diag);
1196 hypre_CSRMatrixSetDataOwner(A->offd,1);
1197 hypre_CSRMatrixI(A->offd) = mfem_hypre_CTAlloc(HYPRE_Int, diag->
Size()+1);
1200 hypre_ParCSRMatrixSetNumNonzeros(A);
1203 if (RowAndColStartsAreEqual(comm, row_starts, col_starts))
1206 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
1207 SyncBackBoolCSR(diag, mem_diag);
1210 hypre_MatvecCommPkgCreate(A);
1220 HYPRE_Int *i_diag, HYPRE_Int *j_diag,
1221 HYPRE_Int *i_offd, HYPRE_Int *j_offd,
1224 HYPRE_Int diag_nnz, offd_nnz;
1227 if (HYPRE_AssumedPartitionCheck())
1229 diag_nnz = i_diag[row[1]-row[0]];
1230 offd_nnz = i_offd[row[1]-row[0]];
1232 A = hypre_ParCSRMatrixCreate(comm, row[2], col[2], row, col,
1233 cmap_size, diag_nnz, offd_nnz);
1237 diag_nnz = i_diag[row[
id+1]-row[id]];
1238 offd_nnz = i_offd[row[
id+1]-row[id]];
1240 A = hypre_ParCSRMatrixCreate(comm, row[np], col[np], row, col,
1241 cmap_size, diag_nnz, offd_nnz);
1244 hypre_ParCSRMatrixSetDataOwner(A,1);
1245#if MFEM_HYPRE_VERSION <= 22200
1246 hypre_ParCSRMatrixSetRowStartsOwner(A,0);
1247 hypre_ParCSRMatrixSetColStartsOwner(A,0);
1251 for (HYPRE_Int i = 0; i < diag_nnz; i++)
1257 for (HYPRE_Int i = 0; i < offd_nnz; i++)
1259 mem_offd.
data[i] = 1.0;
1262 hypre_CSRMatrixSetDataOwner(A->diag,0);
1263 hypre_CSRMatrixI(A->diag) = i_diag;
1264 hypre_CSRMatrixJ(A->diag) = j_diag;
1265 hypre_CSRMatrixData(A->diag) = mem_diag.
data;
1266#ifdef HYPRE_USING_GPU
1267 hypre_CSRMatrixMemoryLocation(A->diag) = HYPRE_MEMORY_HOST;
1270 hypre_CSRMatrixSetDataOwner(A->offd,0);
1271 hypre_CSRMatrixI(A->offd) = i_offd;
1272 hypre_CSRMatrixJ(A->offd) = j_offd;
1273 hypre_CSRMatrixData(A->offd) = mem_offd.
data;
1274#ifdef HYPRE_USING_GPU
1275 hypre_CSRMatrixMemoryLocation(A->offd) = HYPRE_MEMORY_HOST;
1278 hypre_ParCSRMatrixColMapOffd(A) = cmap;
1282 hypre_ParCSRMatrixSetNumNonzeros(A);
1287 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
1290 hypre_MatvecCommPkgCreate(A);
1296 diagOwner = HypreCsrToMem(A->diag, host_mt,
true, mem_diag);
1297 offdOwner = HypreCsrToMem(A->offd, host_mt,
true, mem_offd);
1300 hypre_CSRMatrixSetRownnz(A->diag);
1301 hypre_CSRMatrixSetRownnz(A->offd);
1318 const int part_size = GetPartitioningArraySize(comm);
1320 if (HYPRE_AssumedPartitionCheck())
1322 my_col_start = cols[0];
1323 my_col_end = cols[1];
1328 MPI_Comm_rank(comm, &myid);
1329 my_col_start = cols[myid];
1330 my_col_end = cols[myid+1];
1334 const bool rows_eq_cols = RowAndColStartsAreEqual(comm, rows, cols);
1338 row_starts = col_starts = mfem_hypre_TAlloc_host(
HYPRE_BigInt, part_size);
1339 for (
int i = 0; i < part_size; i++)
1341 row_starts[i] = rows[i];
1346 row_starts = mfem_hypre_TAlloc_host(
HYPRE_BigInt, part_size);
1347 col_starts = mfem_hypre_TAlloc_host(
HYPRE_BigInt, part_size);
1348 for (
int i = 0; i < part_size; i++)
1350 row_starts[i] = rows[i];
1351 col_starts[i] = cols[i];
1357 HYPRE_Int diag_nnz = 0, offd_nnz = 0, offd_num_cols = 0;
1358 map<HYPRE_BigInt, HYPRE_Int> offd_map;
1359 for (HYPRE_Int j = 0, loc_nnz = I[nrows]; j < loc_nnz; j++)
1362 if (my_col_start <= glob_col && glob_col < my_col_end)
1368 offd_map.insert(pair<const HYPRE_BigInt, HYPRE_Int>(glob_col, -1));
1373 for (
auto it = offd_map.begin(); it != offd_map.end(); ++it)
1375 it->second = offd_num_cols++;
1379 A = hypre_ParCSRMatrixCreate(comm, glob_nrows, glob_ncols,
1380 row_starts, col_starts, offd_num_cols,
1381 diag_nnz, offd_nnz);
1382 hypre_ParCSRMatrixInitialize(A);
1388 HYPRE_Int *diag_i, *diag_j, *offd_i, *offd_j;
1390 real_t *diag_data, *offd_data;
1391 diag_i = A->diag->i;
1392 diag_j = A->diag->j;
1393 diag_data = A->diag->data;
1394 offd_i = A->offd->i;
1395 offd_j = A->offd->j;
1396 offd_data = A->offd->data;
1397 offd_col_map = A->col_map_offd;
1399 diag_nnz = offd_nnz = 0;
1400 for (HYPRE_Int i = 0, j = 0; i < nrows; i++)
1402 diag_i[i] = diag_nnz;
1403 offd_i[i] = offd_nnz;
1404 for (HYPRE_Int j_end = I[i+1]; j < j_end; j++)
1407 if (my_col_start <= glob_col && glob_col < my_col_end)
1409 diag_j[diag_nnz] = glob_col - my_col_start;
1410 diag_data[diag_nnz] = data[j];
1415 offd_j[offd_nnz] = offd_map[glob_col];
1416 offd_data[offd_nnz] = data[j];
1421 diag_i[nrows] = diag_nnz;
1422 offd_i[nrows] = offd_nnz;
1423 for (
auto it = offd_map.begin(); it != offd_map.end(); ++it)
1425 offd_col_map[it->second] = it->first;
1428 hypre_ParCSRMatrixSetNumNonzeros(A);
1432 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
1434#if MFEM_HYPRE_VERSION > 22200
1435 mfem_hypre_TFree_host(row_starts);
1438 mfem_hypre_TFree_host(col_starts);
1441 hypre_MatvecCommPkgCreate(A);
1451 hypre_ParCSRMatrix *Ph =
static_cast<hypre_ParCSRMatrix *
>(P);
1456 A = hypre_ParCSRMatrixCompleteClone(Ph);
1458 hypre_ParCSRMatrixCopy(Ph, A, 1);
1466 hypre_ParCSRMatrixSetNumNonzeros(A);
1468 hypre_MatvecCommPkgCreate(A);
1497 MFEM_ASSERT(diagOwner < 0 && offdOwner < 0 && colMapOwner == -1,
"");
1498 MFEM_ASSERT(diagOwner == offdOwner,
"");
1499 MFEM_ASSERT(ParCSROwner,
"");
1500 hypre_ParCSRMatrix *R = A;
1501#ifdef HYPRE_USING_GPU
1508 ParCSROwner =
false;
1536 colMapOwner = colmap;
1541#if MFEM_HYPRE_VERSION <= 22200
1542 if (!A || hypre_ParCSRMatrixOwnsRowStarts(A) ||
1543 (hypre_ParCSRMatrixRowStarts(A) == hypre_ParCSRMatrixColStarts(A) &&
1544 hypre_ParCSRMatrixOwnsColStarts(A)))
1549 const int row_starts_size = GetPartitioningArraySize(hypre_ParCSRMatrixComm(A));
1551 HYPRE_BigInt *old_row_starts = hypre_ParCSRMatrixRowStarts(A);
1554 for (
int i = 0; i < row_starts_size; i++)
1556 new_row_starts[i] = old_row_starts[i];
1559 hypre_ParCSRMatrixRowStarts(A) = new_row_starts;
1560 hypre_ParCSRMatrixOwnsRowStarts(A) = 1;
1562 if (hypre_ParCSRMatrixColStarts(A) == old_row_starts)
1564 hypre_ParCSRMatrixColStarts(A) = new_row_starts;
1565 hypre_ParCSRMatrixOwnsColStarts(A) = 0;
1572#if MFEM_HYPRE_VERSION <= 22200
1573 if (!A || hypre_ParCSRMatrixOwnsColStarts(A) ||
1574 (hypre_ParCSRMatrixRowStarts(A) == hypre_ParCSRMatrixColStarts(A) &&
1575 hypre_ParCSRMatrixOwnsRowStarts(A)))
1580 const int col_starts_size = GetPartitioningArraySize(hypre_ParCSRMatrixComm(A));
1582 HYPRE_BigInt *old_col_starts = hypre_ParCSRMatrixColStarts(A);
1585 for (
int i = 0; i < col_starts_size; i++)
1587 new_col_starts[i] = old_col_starts[i];
1590 hypre_ParCSRMatrixColStarts(A) = new_col_starts;
1592 if (hypre_ParCSRMatrixRowStarts(A) == old_col_starts)
1594 hypre_ParCSRMatrixRowStarts(A) = new_col_starts;
1595 hypre_ParCSRMatrixOwnsRowStarts(A) = 1;
1596 hypre_ParCSRMatrixOwnsColStarts(A) = 0;
1600 hypre_ParCSRMatrixOwnsColStarts(A) = 1;
1607 const int size =
Height();
1613#if MFEM_HYPRE_VERSION >= 21800
1615 "unexpected HypreParMatrix memory location!");
1617 const HYPRE_Int *A_diag_i = A->diag->i;
1618 const real_t *A_diag_d = A->diag->data;
1620 const HYPRE_Int *A_diag_j = A->diag->j;
1624 diag_hd[i] = A_diag_d[A_diag_i[i]];
1626 A_diag_j[A_diag_i[i]] == i,
1627 "The first entry in each row must be the diagonal one!");
1631static void MakeSparseMatrixWrapper(
int nrows,
int ncols,
1632 HYPRE_Int *I, HYPRE_Int *J,
real_t *data,
1636 SparseMatrix tmp(I, J, data, nrows, ncols,
false,
false,
false);
1639 for (
int i = 0; i <= nrows; i++)
1641 mI[i] = internal::to_int(I[i]);
1643 const int nnz = mI[nrows];
1644 int *mJ = Memory<int>(nnz);
1645 for (
int j = 0; j < nnz; j++)
1647 mJ[j] = internal::to_int(J[j]);
1649 SparseMatrix tmp(mI, mJ, data, nrows, ncols,
true,
false,
false);
1654static void MakeWrapper(
const hypre_CSRMatrix *mat,
1655 const MemoryIJData &mem,
1656 SparseMatrix &wrapper)
1658 const int nrows = internal::to_int(hypre_CSRMatrixNumRows(mat));
1659 const int ncols = internal::to_int(hypre_CSRMatrixNumCols(mat));
1660 const int nnz = internal::to_int(mat->num_nonzeros);
1664 MakeSparseMatrixWrapper(nrows, ncols,
1665 const_cast<HYPRE_Int*
>(I),
1666 const_cast<HYPRE_Int*
>(J),
1667 const_cast<real_t*
>(data),
1673 MakeWrapper(A->diag, mem_diag, diag);
1678 MakeWrapper(A->offd, mem_offd, offd);
1679 cmap = A->col_map_offd;
1685 hypre_CSRMatrix *hypre_merged = hypre_MergeDiagAndOffd(A);
1689#if MFEM_HYPRE_VERSION >= 21600
1690 hypre_CSRMatrixBigJtoJ(hypre_merged);
1692 MakeSparseMatrixWrapper(
1693 internal::to_int(hypre_merged->num_rows),
1694 internal::to_int(hypre_merged->num_cols),
1701 merged = merged_tmp;
1703 hypre_CSRMatrixDestroy(hypre_merged);
1707 bool interleaved_rows,
1708 bool interleaved_cols)
const
1713 hypre_ParCSRMatrix **hypre_blocks =
new hypre_ParCSRMatrix*[nr * nc];
1715 internal::hypre_ParCSRMatrixSplit(A, nr, nc, hypre_blocks,
1716 interleaved_rows, interleaved_cols);
1719 for (
int i = 0; i < nr; i++)
1721 for (
int j = 0; j < nc; j++)
1727 delete [] hypre_blocks;
1732 hypre_ParCSRMatrix * At;
1733 hypre_ParCSRMatrixTranspose(A, &At, 1);
1734 hypre_ParCSRMatrixSetNumNonzeros(At);
1736 if (!hypre_ParCSRMatrixCommPkg(At)) { hypre_MatvecCommPkgCreate(At); }
1742 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(At));
1748#if MFEM_HYPRE_VERSION >= 21800
1758 hypre_MatvecCommPkgCreate(A);
1761 hypre_ParCSRMatrix *submat;
1764 int local_num_vars = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
1767#ifdef hypre_IntArrayData
1769 hypre_IntArray *CF_marker;
1771 CF_marker = hypre_IntArrayCreate(local_num_vars);
1772 hypre_IntArrayInitialize_v2(CF_marker, HYPRE_MEMORY_HOST);
1773 hypre_IntArraySetConstantValues(CF_marker, 1);
1778 for (
int j=0; j<indices.
Size(); j++)
1780 if (indices[j] > local_num_vars)
1782 MFEM_WARNING(
"WARNING : " << indices[j] <<
" > " << local_num_vars);
1784#ifdef hypre_IntArrayData
1785 hypre_IntArrayData(CF_marker)[indices[j]] = -1;
1787 CF_marker[indices[j]] = -1;
1792#if (MFEM_HYPRE_VERSION > 22300) || (MFEM_HYPRE_VERSION == 22300 && HYPRE_DEVELOP_NUMBER >=8)
1795 hypre_BoomerAMGCoarseParms(MPI_COMM_WORLD, local_num_vars, 1, NULL,
1796 CF_marker, NULL, cpts_global);
1799 hypre_BoomerAMGCoarseParms(MPI_COMM_WORLD, local_num_vars, 1, NULL,
1800 CF_marker, NULL, &cpts_global);
1804#ifdef hypre_IntArrayData
1805 hypre_ParCSRMatrixExtractSubmatrixFC(A, hypre_IntArrayData(CF_marker),
1806 cpts_global,
"FF", &submat,
1809 hypre_ParCSRMatrixExtractSubmatrixFC(A, CF_marker, cpts_global,
1810 "FF", &submat, threshold);
1813#if (MFEM_HYPRE_VERSION <= 22300) && !(MFEM_HYPRE_VERSION == 22300 && HYPRE_DEVELOP_NUMBER >=8)
1814 mfem_hypre_TFree(cpts_global);
1816#ifdef hypre_IntArrayData
1817 hypre_IntArrayDestroy(CF_marker);
1828#if (MFEM_HYPRE_VERSION == 22500 && HYPRE_DEVELOP_NUMBER >= 1) || \
1829 (MFEM_HYPRE_VERSION > 22500)
1830#ifdef HYPRE_USING_GPU
1833 hypre_ParCSRMatrixLocalTranspose(A);
1841#if (MFEM_HYPRE_VERSION == 22500 && HYPRE_DEVELOP_NUMBER >= 1) || \
1842 (MFEM_HYPRE_VERSION > 22500)
1843#ifdef HYPRE_USING_GPU
1848 hypre_CSRMatrixDestroy(A->diagT);
1853 hypre_CSRMatrixDestroy(A->offdT);
1866 return hypre_ParCSRMatrixMatvec(
a, A, x,
b, y);
1871 MFEM_ASSERT(x.
Size() ==
Width(),
"invalid x.Size() = " << x.
Size()
1872 <<
", expected size = " <<
Width());
1873 MFEM_ASSERT(y.
Size() ==
Height(),
"invalid y.Size() = " << y.
Size()
1874 <<
", expected size = " <<
Height());
1921 hypre_ParCSRMatrixMatvec(
a, A, *X,
b, *Y);
1923 if (!yshallow) { y = *Y; }
1929 MFEM_ASSERT(x.
Size() ==
Height(),
"invalid x.Size() = " << x.
Size()
1930 <<
", expected size = " <<
Height());
1931 MFEM_ASSERT(y.
Size() ==
Width(),
"invalid y.Size() = " << y.
Size()
1932 <<
", expected size = " <<
Width());
1985 hypre_ParCSRMatrixMatvecT(
a, A, *Y,
b, *X);
1987 if (!yshallow) { y = *X; }
1993 return hypre_ParCSRMatrixMatvec(
a, A, (hypre_ParVector *) x,
b,
1994 (hypre_ParVector *) y);
2003 return hypre_ParCSRMatrixMatvecT(
a, A, x,
b, y);
2009 MFEM_ASSERT(x.
Size() ==
Width(),
"invalid x.Size() = " << x.
Size()
2010 <<
", expected size = " <<
Width());
2011 MFEM_ASSERT(y.
Size() ==
Height(),
"invalid y.Size() = " << y.
Size()
2012 <<
", expected size = " <<
Height());
2018 internal::hypre_ParCSRMatrixAbsMatvec(A,
a,
const_cast<real_t*
>(x_data),
2026 MFEM_ASSERT(x.
Size() ==
Height(),
"invalid x.Size() = " << x.
Size()
2027 <<
", expected size = " <<
Height());
2028 MFEM_ASSERT(y.
Size() ==
Width(),
"invalid y.Size() = " << y.
Size()
2029 <<
", expected size = " <<
Width());
2035 internal::hypre_ParCSRMatrixAbsMatvecT(A,
a,
const_cast<real_t*
>(x_data),
2043 const bool assumed_partition = HYPRE_AssumedPartitionCheck();
2044 const bool row_starts_given = (row_starts != NULL);
2045 if (!row_starts_given)
2047 row_starts = hypre_ParCSRMatrixRowStarts(A);
2048 MFEM_VERIFY(D.
Height() == hypre_CSRMatrixNumRows(A->diag),
2049 "the matrix D is NOT compatible with the row starts of"
2050 " this HypreParMatrix, row_starts must be given.");
2055 if (assumed_partition)
2061 MPI_Comm_rank(
GetComm(), &offset);
2063 int local_num_rows = row_starts[offset+1]-row_starts[offset];
2064 MFEM_VERIFY(local_num_rows == D.
Height(),
"the number of rows in D is "
2065 " not compatible with the given row_starts");
2072 if (assumed_partition)
2075 if (row_starts_given)
2077 global_num_rows = row_starts[2];
2084 global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
2089 MPI_Comm_size(
GetComm(), &part_size);
2090 global_num_rows = row_starts[part_size];
2094 HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A);
2100 GetOffd(A_offd, col_map_offd);
2110 DuplicateAs<HYPRE_BigInt>(row_starts, part_size,
false);
2112 (row_starts == col_starts ? new_row_starts :
2113 DuplicateAs<HYPRE_BigInt>(col_starts, part_size,
false));
2115 DuplicateAs<HYPRE_BigInt>(col_map_offd, A_offd.
Width());
2119 const bool own_diag_offd =
true;
2124 global_num_rows, hypre_ParCSRMatrixGlobalNumCols(A),
2125 new_row_starts, new_col_starts,
2126 DA_diag, DA_offd, new_col_map_offd,
2129#if MFEM_HYPRE_VERSION <= 22200
2131 hypre_ParCSRMatrixSetRowStartsOwner(DA->A, 1);
2132 hypre_ParCSRMatrixSetColStartsOwner(DA->A, 1);
2134 mfem_hypre_TFree_host(new_row_starts);
2135 mfem_hypre_TFree_host(new_col_starts);
2137 DA->colMapOwner = 1;
2144 if (hypre_CSRMatrixNumRows(A->diag) != hypre_CSRMatrixNumRows(A->offd))
2149 if (hypre_CSRMatrixNumRows(A->diag) != diag.
Size())
2151 mfem_error(
"Note the Vector diag is not of compatible dimensions with A\n");
2158 real_t *Adiag_data = hypre_CSRMatrixData(A->diag);
2159 HYPRE_Int *Adiag_i = hypre_CSRMatrixI(A->diag);
2161 real_t *Aoffd_data = hypre_CSRMatrixData(A->offd);
2162 HYPRE_Int *Aoffd_i = hypre_CSRMatrixI(A->offd);
2165 for (
int i(0); i < size; ++i)
2168 for (jj = Adiag_i[i]; jj < Adiag_i[i+1]; ++jj)
2170 Adiag_data[jj] *= val;
2172 for (jj = Aoffd_i[i]; jj < Aoffd_i[i+1]; ++jj)
2174 Aoffd_data[jj] *= val;
2183 if (hypre_CSRMatrixNumRows(A->diag) != hypre_CSRMatrixNumRows(A->offd))
2188 if (hypre_CSRMatrixNumRows(A->diag) != diag.
Size())
2190 mfem_error(
"Note the Vector diag is not of compatible dimensions with A\n");
2197 real_t *Adiag_data = hypre_CSRMatrixData(A->diag);
2198 HYPRE_Int *Adiag_i = hypre_CSRMatrixI(A->diag);
2201 real_t *Aoffd_data = hypre_CSRMatrixData(A->offd);
2202 HYPRE_Int *Aoffd_i = hypre_CSRMatrixI(A->offd);
2205 for (
int i(0); i < size; ++i)
2210 mfem_error(
"HypreParMatrix::InvDiagScale : Division by 0");
2214 for (jj = Adiag_i[i]; jj < Adiag_i[i+1]; ++jj)
2216 Adiag_data[jj] *= val;
2218 for (jj = Aoffd_i[i]; jj < Aoffd_i[i+1]; ++jj)
2220 Aoffd_data[jj] *= val;
2229 if (hypre_CSRMatrixNumRows(A->diag) != hypre_CSRMatrixNumRows(A->offd))
2236 HYPRE_Int size=hypre_CSRMatrixNumRows(A->diag);
2239 real_t *Adiag_data = hypre_CSRMatrixData(A->diag);
2240 HYPRE_Int *Adiag_i = hypre_CSRMatrixI(A->diag);
2241 for (jj = 0; jj < Adiag_i[size]; ++jj)
2243 Adiag_data[jj] *= s;
2246 real_t *Aoffd_data = hypre_CSRMatrixData(A->offd);
2247 HYPRE_Int *Aoffd_i = hypre_CSRMatrixI(A->offd);
2248 for (jj = 0; jj < Aoffd_i[size]; ++jj)
2250 Aoffd_data[jj] *= s;
2256static void get_sorted_rows_cols(
const Array<int> &rows_cols,
2262 for (
int i = 0; i < rows_cols.
Size(); i++)
2264 hypre_sorted[i] = rows_cols[i];
2265 if (i && rows_cols[i-1] > rows_cols[i]) { sorted =
false; }
2267 if (!sorted) { hypre_sorted.
Sort(); }
2275 hypre_CSRMatrix * csr_A;
2276 hypre_CSRMatrix * csr_A_wo_z;
2277 hypre_ParCSRMatrix * parcsr_A_ptr;
2282 comm = hypre_ParCSRMatrixComm(A);
2284 ierr += hypre_ParCSRMatrixGetLocalRange(A,
2285 &row_start,&row_end,
2286 &col_start,&col_end );
2288 row_starts = hypre_ParCSRMatrixRowStarts(A);
2289 col_starts = hypre_ParCSRMatrixColStarts(A);
2291#if MFEM_HYPRE_VERSION <= 22200
2292 bool old_owns_row = hypre_ParCSRMatrixOwnsRowStarts(A);
2293 bool old_owns_col = hypre_ParCSRMatrixOwnsColStarts(A);
2295 HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
2296 HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
2297 parcsr_A_ptr = hypre_ParCSRMatrixCreate(comm, global_num_rows,
2299 row_starts, col_starts,
2301#if MFEM_HYPRE_VERSION <= 22200
2302 hypre_ParCSRMatrixOwnsRowStarts(parcsr_A_ptr) = old_owns_row;
2303 hypre_ParCSRMatrixOwnsColStarts(parcsr_A_ptr) = old_owns_col;
2304 hypre_ParCSRMatrixOwnsRowStarts(A) = 0;
2305 hypre_ParCSRMatrixOwnsColStarts(A) = 0;
2308 csr_A = hypre_MergeDiagAndOffd(A);
2314 csr_A_wo_z = hypre_CSRMatrixDeleteZeros(csr_A,threshold);
2318 if (csr_A_wo_z == NULL)
2324 ierr += hypre_CSRMatrixDestroy(csr_A);
2330 ierr += GenerateDiagAndOffd(csr_A_wo_z,parcsr_A_ptr,
2333 ierr += hypre_CSRMatrixDestroy(csr_A_wo_z);
2335 MFEM_VERIFY(ierr == 0,
"");
2339 hypre_ParCSRMatrixSetNumNonzeros(A);
2341 if (RowAndColStartsAreEqual(comm, row_starts, col_starts))
2343 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
2345 if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); }
2352 HYPRE_Int old_err = hypre_error_flag;
2353 hypre_error_flag = 0;
2355#if MFEM_HYPRE_VERSION < 21400
2360 HYPRE_Int *diag_I = A->diag->i, *offd_I = A->offd->i;
2361 real_t *diag_d = A->diag->data, *offd_d = A->offd->data;
2362 HYPRE_Int local_num_rows = A->diag->num_rows;
2363 real_t max_l2_row_norm = 0.0;
2365 for (HYPRE_Int r = 0; r < local_num_rows; r++)
2370 l2_row_norm = std::hypot(l2_row_norm, row.
Norml2());
2371 max_l2_row_norm = std::max(max_l2_row_norm, l2_row_norm);
2373 real_t loc_max_l2_row_norm = max_l2_row_norm;
2374 MPI_Allreduce(&loc_max_l2_row_norm, &max_l2_row_norm, 1,
2377 threshold = tol * max_l2_row_norm;
2382#elif MFEM_HYPRE_VERSION < 21800
2384 HYPRE_Int err_flag = hypre_ParCSRMatrixDropSmallEntries(A, tol);
2385 MFEM_VERIFY(!err_flag,
"error encountered: error code = " << err_flag);
2389 HYPRE_Int err_flag = hypre_ParCSRMatrixDropSmallEntries(A, tol, 2);
2390 MFEM_VERIFY(!err_flag,
"error encountered: error code = " << err_flag);
2394 hypre_error_flag = old_err;
2402 get_sorted_rows_cols(rows_cols, rc_sorted);
2404 internal::hypre_ParCSRMatrixEliminateAXB(
2411 get_sorted_rows_cols(rows_cols, rc_sorted);
2413 hypre_ParCSRMatrix* Ae;
2415 internal::hypre_ParCSRMatrixEliminateAAe(
2425 get_sorted_rows_cols(cols, rc_sorted);
2427 hypre_ParCSRMatrix* Ae;
2429 internal::hypre_ParCSRMatrixEliminateAAe(
2430 A, &Ae, rc_sorted.
Size(), rc_sorted.
GetData(), 1);
2438 if (rows.
Size() > 0)
2441 get_sorted_rows_cols(rows, r_sorted);
2443 internal::hypre_ParCSRMatrixEliminateRows(A, r_sorted.
Size(),
2454 Ae.
Mult(-1.0, x, 1.0,
b);
2458 if (ess_dof_list.
Size() == 0) {
return; }
2461 hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
2462 real_t *data = hypre_CSRMatrixData(A_diag);
2463 HYPRE_Int *I = hypre_CSRMatrixI(A_diag);
2465 HYPRE_Int *J = hypre_CSRMatrixJ(A_diag);
2466 hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
2467 HYPRE_Int *I_offd = hypre_CSRMatrixI(A_offd);
2468 real_t *data_offd = hypre_CSRMatrixData(A_offd);
2475 for (
int i = 0; i < ess_dof_list.
Size(); i++)
2477 int r = ess_dof_list[i];
2478 b(r) = data[I[r]] * x(r);
2480 MFEM_ASSERT(I[r] < I[r+1],
"empty row found!");
2486 MFEM_ABORT(
"the diagonal entry must be the first entry in the row!");
2488 for (
int j = I[r]+1; j < I[r+1]; j++)
2492 MFEM_ABORT(
"all off-diagonal entries must be zero!");
2495 for (
int j = I_offd[r]; j < I_offd[r+1]; j++)
2497 if (data_offd[j] != 0.0)
2499 MFEM_ABORT(
"all off-diagonal entries must be zero!");
2510 hypre_ParCSRMatrix *A_hypre = *
this;
2513 hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A_hypre);
2514 hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A_hypre);
2516 HYPRE_Int diag_nrows = hypre_CSRMatrixNumRows(diag);
2517 HYPRE_Int offd_ncols = hypre_CSRMatrixNumCols(offd);
2519 const int n_ess_dofs = ess_dofs.
Size();
2520 const auto ess_dofs_d = ess_dofs.
GetMemory().Read(
2525 hypre_ParCSRCommHandle *comm_handle;
2526 HYPRE_Int *int_buf_data, *eliminate_row, *eliminate_col;
2528 eliminate_row = mfem_hypre_CTAlloc(HYPRE_Int, diag_nrows);
2529 eliminate_col = mfem_hypre_CTAlloc(HYPRE_Int, offd_ncols);
2532 hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A_hypre);
2535 hypre_MatvecCommPkgCreate(A_hypre);
2536 comm_pkg = hypre_ParCSRMatrixCommPkg(A_hypre);
2542 eliminate_row[i] = 0;
2546 eliminate_row[ess_dofs_d[i]] = 1;
2552 HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
2553 HYPRE_Int int_buf_sz = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
2554 int_buf_data = mfem_hypre_CTAlloc(HYPRE_Int, int_buf_sz);
2556 HYPRE_Int *send_map_elmts;
2557#if defined(HYPRE_USING_GPU)
2560 hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg);
2561 send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg);
2566 send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg);
2570 int k = send_map_elmts[i];
2571 int_buf_data[i] = eliminate_row[k];
2574#if defined(HYPRE_USING_GPU)
2578 comm_handle = hypre_ParCSRCommHandleCreate_v2(
2579 11, comm_pkg, HYPRE_MEMORY_DEVICE, int_buf_data,
2580 HYPRE_MEMORY_DEVICE, eliminate_col);
2585 comm_handle = hypre_ParCSRCommHandleCreate(
2586 11, comm_pkg, int_buf_data, eliminate_col );
2592 const auto I = diag->i;
2593 const auto J = diag->j;
2594 auto data = diag->data;
2598 const int idof = ess_dofs_d[i];
2599 for (
auto j=I[idof]; j<I[idof+1]; ++j)
2601 const auto jdof = J[j];
2617 for (
auto k=I[jdof]; k<I[jdof+1]; ++k)
2632 const auto I = offd->i;
2633 auto data = offd->data;
2636 const int idof = ess_dofs_d[i];
2637 for (
auto j=I[idof]; j<I[idof+1]; ++j)
2645 hypre_ParCSRCommHandleDestroy(comm_handle);
2646 mfem_hypre_TFree(int_buf_data);
2647 mfem_hypre_TFree(eliminate_row);
2651 const int nrows_offd = hypre_CSRMatrixNumRows(offd);
2652 const auto I = offd->i;
2653 const auto J = offd->j;
2654 auto data = offd->data;
2657 for (
auto j=I[i]; j<I[i+1]; ++j)
2659 data[j] *= 1 - eliminate_col[J[j]];
2664 mfem_hypre_TFree(eliminate_col);
2668 HYPRE_Int offj)
const
2671 hypre_ParCSRMatrixPrintIJ(A, offi, offj, fname.c_str());
2675void HypreParMatrix::Read(MPI_Comm comm,
const std::string &fname)
2677 HYPRE_ParCSRMatrix A_parcsr;
2678 HYPRE_Int base_i, base_j;
2679 hypre_ParCSRMatrixReadIJ(comm, fname.c_str(), &base_i, &base_j, &A_parcsr);
2683 hypre_ParCSRMatrixSetNumNonzeros(A);
2684 if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); }
2689 HYPRE_IJMatrix A_ij;
2690 HYPRE_IJMatrixRead(fname.c_str(), comm, 5555, &A_ij);
2692 HYPRE_ParCSRMatrix A_parcsr;
2693 HYPRE_IJMatrixGetObject(A_ij, (
void**) &A_parcsr);
2697 hypre_ParCSRMatrixSetNumNonzeros(A);
2698 if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); }
2703 hypre_ParCSRCommPkg *comm_pkg = A->comm_pkg;
2704 MPI_Comm comm = A->comm;
2706 const int tag = 46801;
2708 MPI_Comm_rank(comm, &myid);
2709 MPI_Comm_size(comm, &nproc);
2713 MPI_Recv(&c, 1, MPI_CHAR, myid-1, tag, comm, MPI_STATUS_IGNORE);
2717 os <<
"\nHypreParMatrix: hypre_ParCSRCommPkg:\n";
2719 os <<
"Rank " << myid <<
":\n"
2720 " number of sends = " << comm_pkg->num_sends <<
2721 " (" <<
sizeof(
real_t)*comm_pkg->send_map_starts[comm_pkg->num_sends] <<
2723 " number of recvs = " << comm_pkg->num_recvs <<
2724 " (" <<
sizeof(
real_t)*comm_pkg->recv_vec_starts[comm_pkg->num_recvs] <<
2726 if (myid != nproc-1)
2729 MPI_Send(&c, 1, MPI_CHAR, myid+1, tag, comm);
2742 os <<
"global number of rows : " << A->global_num_rows <<
'\n'
2743 <<
"global number of columns : " << A->global_num_cols <<
'\n'
2744 <<
"first row index : " << A->first_row_index <<
'\n'
2745 <<
" last row index : " << A->last_row_index <<
'\n'
2746 <<
"first col diag : " << A->first_col_diag <<
'\n'
2747 <<
" last col diag : " << A->last_col_diag <<
'\n'
2748 <<
"number of nonzeros : " << A->num_nonzeros <<
'\n';
2750 hypre_CSRMatrix *csr = A->diag;
2751 const char *csr_name =
"diag";
2752 for (
int m = 0; m < 2; m++)
2754 auto csr_nnz = csr->i[csr->num_rows];
2755 os << csr_name <<
" num rows : " << csr->num_rows <<
'\n'
2756 << csr_name <<
" num cols : " << csr->num_cols <<
'\n'
2757 << csr_name <<
" num nnz : " << csr->num_nonzeros <<
'\n'
2758 << csr_name <<
" i last : " << csr_nnz
2759 << (csr_nnz == csr->num_nonzeros ?
2760 " [good]" :
" [** BAD **]") <<
'\n';
2762 os << csr_name <<
" i hash : " << hf.
GetHash() <<
'\n';
2763 os << csr_name <<
" j hash : ";
2764 if (csr->j ==
nullptr)
2773#if MFEM_HYPRE_VERSION >= 21600
2774 os << csr_name <<
" big j hash : ";
2775 if (csr->big_j ==
nullptr)
2785 os << csr_name <<
" data hash : ";
2786 if (csr->data ==
nullptr)
2800 hf.
AppendInts(A->col_map_offd, A->offd->num_cols);
2801 os <<
"col map offd hash : " << hf.
GetHash() <<
'\n';
2808#if MFEM_HYPRE_VERSION >= 21900
2810 const int ierr = hypre_ParCSRMatrixNormFro(A, &norm_fro);
2811 MFEM_VERIFY(ierr == 0,
"");
2818 Vector Avec_diag(A->diag->data, A->diag->num_nonzeros);
2820 Vector Avec_offd(A->offd->data, A->offd->num_nonzeros);
2823 MPI_SUM, hypre_ParCSRMatrixComm(A));
2824 norm_fro = sqrt(normsqr_fro);
2833 HYPRE_BigInt *A_col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
2834 int size = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A));
2838void HypreParMatrix::Destroy()
2840 if ( X != NULL ) {
delete X; }
2841 if ( Y != NULL ) {
delete Y; }
2845 if (A == NULL) {
return; }
2847#ifdef HYPRE_USING_GPU
2848 if (
HypreUsingGPU() && ParCSROwner && (diagOwner < 0 || offdOwner < 0))
2856 MFEM_VERIFY(!(diagOwner < 0 && offdOwner < 0) || diagOwner == offdOwner,
2859 MemoryClass mc = (diagOwner == -1 || offdOwner == -1) ?
2861 Write(mc, diagOwner < 0, offdOwner <0);
2870 hypre_CSRMatrixI(A->diag) = NULL;
2871 hypre_CSRMatrixJ(A->diag) = NULL;
2872 hypre_CSRMatrixData(A->diag) = NULL;
2879 hypre_CSRMatrixI(A->offd) = NULL;
2880 hypre_CSRMatrixJ(A->offd) = NULL;
2881 hypre_CSRMatrixData(A->offd) = NULL;
2883 if (colMapOwner >= 0)
2885 if (colMapOwner & 1)
2889 hypre_ParCSRMatrixColMapOffd(A) = NULL;
2894 hypre_ParCSRMatrixDestroy(A);
2903 MFEM_CONTRACT_VAR(own_j);
2904 MFEM_ASSERT(own_i == own_j,
"Inconsistent ownership");
2918#if MFEM_HYPRE_VERSION >= 21800
2927 hypre_ParCSRMatrix *C_hypre;
2928 hypre_ParcsrBdiagInvScal(*A, blocksize, &C_hypre);
2929 hypre_ParCSRMatrixDropSmallEntries(C_hypre, 1e-15, 1);
2939 hypre_ParVector *d_hypre;
2940 hypre_ParvecBdiagInvScal(b_Hypre, blocksize, &d_hypre, *A);
2948#if MFEM_HYPRE_VERSION < 21400
2953 hypre_ParCSRMatrix *C_hypre =
2956 MFEM_VERIFY(C_hypre,
"error in hypre_ParCSRMatrixAdd");
2958 if (!hypre_ParCSRMatrixCommPkg(C_hypre)) { hypre_MatvecCommPkgCreate(C_hypre); }
2969 hypre_ParCSRMatrix * C = internal::hypre_ParCSRMatrixAdd(*A,*B);
2971 if (!hypre_ParCSRMatrixCommPkg(C)) { hypre_MatvecCommPkgCreate(C); }
2981 hypre_ParCSRMatrix *C;
2982#if MFEM_HYPRE_VERSION <= 22000
2985 hypre_ParCSRMatrixAdd(
alpha, A,
beta, B, &C);
2987 if (!hypre_ParCSRMatrixCommPkg(C)) { hypre_MatvecCommPkgCreate(C); }
2989 return new HypreParMatrix(C);
2992HypreParMatrix *
ParAdd(
const HypreParMatrix *A,
const HypreParMatrix *B)
2994 hypre_ParCSRMatrix *C;
2995#if MFEM_HYPRE_VERSION <= 22000
2996 hypre_ParcsrAdd(1.0, *A, 1.0, *B, &C);
2998 hypre_ParCSRMatrixAdd(1.0, *A, 1.0, *B, &C);
3000 if (!hypre_ParCSRMatrixCommPkg(C)) { hypre_MatvecCommPkgCreate(C); }
3002 return new HypreParMatrix(C);
3010 hypre_ParCSRMatrix * ab;
3011#ifdef HYPRE_USING_GPU
3014 ab = hypre_ParCSRMatMat(*A, *B);
3019 ab = hypre_ParMatmul(*A,*B);
3021 hypre_ParCSRMatrixSetNumNonzeros(ab);
3023 if (!hypre_ParCSRMatrixCommPkg(ab)) { hypre_MatvecCommPkgCreate(ab); }
3035 hypre_ParCSRMatrix * rap;
3037#ifdef HYPRE_USING_GPU
3046 hypre_ParCSRMatrix *Q = hypre_ParCSRMatMat(*A,*P);
3047 const bool keepTranspose =
false;
3048 rap = hypre_ParCSRTMatMatKT(*P,Q,keepTranspose);
3049 hypre_ParCSRMatrixDestroy(Q);
3057#if MFEM_HYPRE_VERSION <= 22200
3058 HYPRE_Int P_owns_its_col_starts =
3059 hypre_ParCSRMatrixOwnsColStarts((hypre_ParCSRMatrix*)(*P));
3062 hypre_BoomerAMGBuildCoarseOperator(*P,*A,*P,&rap);
3064#if MFEM_HYPRE_VERSION <= 22200
3067 hypre_ParCSRMatrixSetRowStartsOwner(rap,0);
3068 hypre_ParCSRMatrixSetColStartsOwner(rap,0);
3069 if (P_owns_its_col_starts)
3071 hypre_ParCSRMatrixSetColStartsOwner(*P, 1);
3076 hypre_ParCSRMatrixSetNumNonzeros(rap);
3085 hypre_ParCSRMatrix * rap;
3087#ifdef HYPRE_USING_GPU
3090 hypre_ParCSRMatrix *Q = hypre_ParCSRMatMat(*A,*P);
3091 rap = hypre_ParCSRTMatMat(*Rt,Q);
3092 hypre_ParCSRMatrixDestroy(Q);
3097#if MFEM_HYPRE_VERSION <= 22200
3098 HYPRE_Int P_owns_its_col_starts =
3099 hypre_ParCSRMatrixOwnsColStarts((hypre_ParCSRMatrix*)(*P));
3100 HYPRE_Int Rt_owns_its_col_starts =
3101 hypre_ParCSRMatrixOwnsColStarts((hypre_ParCSRMatrix*)(*Rt));
3104 hypre_BoomerAMGBuildCoarseOperator(*Rt,*A,*P,&rap);
3106#if MFEM_HYPRE_VERSION <= 22200
3109 hypre_ParCSRMatrixSetRowStartsOwner(rap,0);
3110 hypre_ParCSRMatrixSetColStartsOwner(rap,0);
3111 if (P_owns_its_col_starts)
3113 hypre_ParCSRMatrixSetColStartsOwner(*P, 1);
3115 if (Rt_owns_its_col_starts)
3117 hypre_ParCSRMatrixSetColStartsOwner(*Rt, 1);
3122 hypre_ParCSRMatrixSetNumNonzeros(rap);
3131 const int num_loc,
const Array<int> &offsets,
3132 std::vector<int> &all_num_loc,
const int numBlocks,
3133 std::vector<std::vector<HYPRE_BigInt>> &blockProcOffsets,
3134 std::vector<HYPRE_BigInt> &procOffsets,
3135 std::vector<std::vector<int>> &procBlockOffsets,
3138 std::vector<std::vector<int>> all_block_num_loc(numBlocks);
3140 MPI_Allgather(&num_loc, 1, MPI_INT, all_num_loc.data(), 1, MPI_INT, comm);
3142 for (
int j = 0; j < numBlocks; ++j)
3144 all_block_num_loc[j].resize(nprocs);
3145 blockProcOffsets[j].resize(nprocs);
3147 const int blockNumRows = offsets[j + 1] - offsets[j];
3148 MPI_Allgather(&blockNumRows, 1, MPI_INT, all_block_num_loc[j].data(), 1,
3150 blockProcOffsets[j][0] = 0;
3151 for (
int i = 0; i < nprocs - 1; ++i)
3153 blockProcOffsets[j][i + 1] = blockProcOffsets[j][i]
3154 + all_block_num_loc[j][i];
3161 for (
int i = 0; i < nprocs; ++i)
3163 globalNum += all_num_loc[i];
3164 MFEM_VERIFY(globalNum >= 0,
"overflow in global size");
3167 firstLocal += all_num_loc[i];
3172 procOffsets[i + 1] = procOffsets[i] + all_num_loc[i];
3175 procBlockOffsets[i].resize(numBlocks);
3176 procBlockOffsets[i][0] = 0;
3177 for (
int j = 1; j < numBlocks; ++j)
3179 procBlockOffsets[i][j] = procBlockOffsets[i][j - 1]
3180 + all_block_num_loc[j - 1][i];
3188 const int numBlockRows = blocks.
NumRows();
3189 const int numBlockCols = blocks.
NumCols();
3191 MFEM_VERIFY(numBlockRows > 0 &&
3192 numBlockCols > 0,
"Invalid input to HypreParMatrixFromBlocks");
3194 if (blockCoeff != NULL)
3196 MFEM_VERIFY(numBlockRows == blockCoeff->
NumRows() &&
3197 numBlockCols == blockCoeff->
NumCols(),
3198 "Invalid input to HypreParMatrixFromBlocks");
3204 int nonNullBlockRow0 = -1;
3205 for (
int j=0; j<numBlockCols; ++j)
3207 if (blocks(0,j) != NULL)
3209 nonNullBlockRow0 = j;
3214 MFEM_VERIFY(nonNullBlockRow0 >= 0,
"Null row of blocks");
3215 MPI_Comm comm = blocks(0,nonNullBlockRow0)->GetComm();
3220 for (
int i=0; i<numBlockRows; ++i)
3222 for (
int j=0; j<numBlockCols; ++j)
3224 if (blocks(i,j) != NULL)
3226 const int nrows = blocks(i,j)->
NumRows();
3227 const int ncols = blocks(i,j)->
NumCols();
3229 if (rowOffsets[i+1] == 0)
3231 rowOffsets[i+1] = nrows;
3235 MFEM_VERIFY(rowOffsets[i+1] == nrows,
3236 "Inconsistent blocks in HypreParMatrixFromBlocks");
3239 if (colOffsets[j+1] == 0)
3241 colOffsets[j+1] = ncols;
3245 MFEM_VERIFY(colOffsets[j+1] == ncols,
3246 "Inconsistent blocks in HypreParMatrixFromBlocks");
3250 rowOffsets[i+1] += rowOffsets[i];
3253 for (
int j=0; j<numBlockCols; ++j)
3255 colOffsets[j+1] += colOffsets[j];
3258 const int num_loc_rows = rowOffsets[numBlockRows];
3259 const int num_loc_cols = colOffsets[numBlockCols];
3262 MPI_Comm_rank(comm, &rank);
3263 MPI_Comm_size(comm, &nprocs);
3265 std::vector<int> all_num_loc_rows(nprocs);
3266 std::vector<int> all_num_loc_cols(nprocs);
3267 std::vector<HYPRE_BigInt> procRowOffsets(nprocs);
3268 std::vector<HYPRE_BigInt> procColOffsets(nprocs);
3269 std::vector<std::vector<HYPRE_BigInt>> blockRowProcOffsets(numBlockRows);
3270 std::vector<std::vector<HYPRE_BigInt>> blockColProcOffsets(numBlockCols);
3271 std::vector<std::vector<int>> procBlockRowOffsets(nprocs);
3272 std::vector<std::vector<int>> procBlockColOffsets(nprocs);
3274 HYPRE_BigInt first_loc_row, glob_nrows, first_loc_col, glob_ncols;
3276 all_num_loc_rows, numBlockRows, blockRowProcOffsets,
3277 procRowOffsets, procBlockRowOffsets, first_loc_row,
3281 all_num_loc_cols, numBlockCols, blockColProcOffsets,
3282 procColOffsets, procBlockColOffsets, first_loc_col,
3285 std::vector<int> opI(num_loc_rows + 1);
3286 std::vector<int> cnt(num_loc_rows);
3288 for (
int i = 0; i < num_loc_rows; ++i)
3294 opI[num_loc_rows] = 0;
3299 for (
int i = 0; i < numBlockRows; ++i)
3301 for (
int j = 0; j < numBlockCols; ++j)
3303 if (blocks(i, j) == NULL)
3305 csr_blocks(i, j) = NULL;
3309 blocks(i, j)->HostRead();
3310 csr_blocks(i, j) = hypre_MergeDiagAndOffd(*blocks(i, j));
3311 blocks(i, j)->HypreRead();
3313 for (
int k = 0; k < csr_blocks(i, j)->num_rows; ++k)
3315 opI[rowOffsets[i] + k + 1] +=
3316 csr_blocks(i, j)->i[k + 1] - csr_blocks(i, j)->i[k];
3323 for (
int i = 0; i < num_loc_rows; ++i)
3325 opI[i + 1] += opI[i];
3328 const int nnz = opI[num_loc_rows];
3330 std::vector<HYPRE_BigInt> opJ(nnz);
3331 std::vector<real_t> data(nnz);
3334 for (
int i = 0; i < numBlockRows; ++i)
3336 for (
int j = 0; j < numBlockCols; ++j)
3338 if (csr_blocks(i, j) != NULL)
3340 const int nrows = csr_blocks(i, j)->num_rows;
3341 const real_t cij = blockCoeff ? (*blockCoeff)(i, j) : 1.0;
3342#if MFEM_HYPRE_VERSION >= 21600
3343 const bool usingBigJ = (csr_blocks(i, j)->big_j != NULL);
3346 for (
int k = 0; k < nrows; ++k)
3348 const int rowg = rowOffsets[i] + k;
3349 const int nnz_k = csr_blocks(i,j)->i[k+1]-csr_blocks(i,j)->i[k];
3350 const int osk = csr_blocks(i, j)->i[k];
3352 for (
int l = 0; l < nnz_k; ++l)
3355#if MFEM_HYPRE_VERSION >= 21600
3356 const HYPRE_Int bcol = usingBigJ ?
3357 csr_blocks(i, j)->big_j[osk + l] :
3358 csr_blocks(i, j)->j[osk + l];
3360 const HYPRE_Int bcol = csr_blocks(i, j)->j[osk + l];
3364 const auto &offs = blockColProcOffsets[j];
3365 const int bcolproc =
3366 std::upper_bound(offs.begin() + 1, offs.end(), bcol)
3369 opJ[opI[rowg] + cnt[rowg]] = procColOffsets[bcolproc] +
3370 procBlockColOffsets[bcolproc][j]
3372 - blockColProcOffsets[j][bcolproc];
3373 data[opI[rowg] + cnt[rowg]] = cij * csr_blocks(i, j)->data[osk + l];
3381 for (
int i = 0; i < numBlockRows; ++i)
3383 for (
int j = 0; j < numBlockCols; ++j)
3385 if (csr_blocks(i, j) != NULL)
3387 hypre_CSRMatrixDestroy(csr_blocks(i, j));
3392 MFEM_VERIFY(HYPRE_AssumedPartitionCheck(),
3393 "only 'assumed partition' mode is supported");
3395 std::vector<HYPRE_BigInt> rowStarts2(2);
3396 rowStarts2[0] = first_loc_row;
3397 rowStarts2[1] = first_loc_row + all_num_loc_rows[rank];
3399 int square = std::equal(all_num_loc_rows.begin(), all_num_loc_rows.end(),
3400 all_num_loc_cols.begin());
3403 return new HypreParMatrix(comm, num_loc_rows, glob_nrows, glob_ncols,
3404 opI.data(), opJ.data(),
3411 std::vector<HYPRE_BigInt> colStarts2(2);
3412 colStarts2[0] = first_loc_col;
3413 colStarts2[1] = first_loc_col + all_num_loc_cols[rank];
3415 return new HypreParMatrix(comm, num_loc_rows, glob_nrows, glob_ncols,
3416 opI.data(), opJ.data(),
3427 for (
int i = 0; i < blocks.
NumRows(); ++i)
3429 for (
int j = 0; j < blocks.
NumCols(); ++j)
3431 constBlocks(i, j) = blocks(i, j);
3457 hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
3458 HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
3460 real_t *u_data = hypre_VectorData(hypre_ParVectorLocalVector(
u));
3461 real_t *r_data = hypre_VectorData(hypre_ParVectorLocalVector(r));
3463 for (
int i = 0; i < N; i++)
3466 hypre_ParVectorCopy(
f, r);
3467 hypre_ParCSRMatrixMatvec(-1.0, A,
u, 1.0, r);
3470 (0 == (i % 2)) ? coef = lambda : coef =
mu;
3472 for (HYPRE_Int j = 0; j < num_rows; j++)
3474 u_data[j] += coef*r_data[j] / max_eig;
3490 hypre_ParVector *x0,
3491 hypre_ParVector *x1,
3492 hypre_ParVector *x2,
3493 hypre_ParVector *x3)
3496 hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
3497 HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
3499 real_t *u_data = hypre_VectorData(hypre_ParVectorLocalVector(
u));
3501 real_t *x0_data = hypre_VectorData(hypre_ParVectorLocalVector(x0));
3502 real_t *x1_data = hypre_VectorData(hypre_ParVectorLocalVector(x1));
3503 real_t *x2_data = hypre_VectorData(hypre_ParVectorLocalVector(x2));
3504 real_t *x3_data = hypre_VectorData(hypre_ParVectorLocalVector(x3));
3506 hypre_ParVectorCopy(
u, x0);
3509 hypre_ParVectorCopy(
f, x1);
3510 hypre_ParCSRMatrixMatvec(-1.0, A, x0, 1.0, x1);
3512 for (HYPRE_Int i = 0; i < num_rows; i++)
3514 x1_data[i] /= -max_eig;
3518 for (HYPRE_Int i = 0; i < num_rows; i++)
3520 x1_data[i] = x0_data[i] -x1_data[i];
3524 for (HYPRE_Int i = 0; i < num_rows; i++)
3526 x3_data[i] = fir_coeffs[0]*x0_data[i] +fir_coeffs[1]*x1_data[i];
3529 for (
int n = 2; n <= poly_order; n++)
3532 hypre_ParVectorCopy(
f, x2);
3533 hypre_ParCSRMatrixMatvec(-1.0, A, x1, 1.0, x2);
3535 for (HYPRE_Int i = 0; i < num_rows; i++)
3537 x2_data[i] /= -max_eig;
3545 for (HYPRE_Int i = 0; i < num_rows; i++)
3547 x2_data[i] = (x1_data[i]-x0_data[i]) +(x1_data[i]-2*x2_data[i]);
3548 x3_data[i] += fir_coeffs[n]*x2_data[i];
3549 x0_data[i] = x1_data[i];
3550 x1_data[i] = x2_data[i];
3554 for (HYPRE_Int i = 0; i < num_rows; i++)
3556 u_data[i] = x3_data[i];
3577 B =
X =
V =
Z = NULL;
3585 int relax_times_,
real_t relax_weight_,
3586 real_t omega_,
int poly_order_,
3587 real_t poly_fraction_,
int eig_est_cg_iter_)
3599 B =
X =
V =
Z = NULL;
3610 type =
static_cast<int>(type_);
3621 int eig_est_cg_iter_)
3639 if (!strcmp(name,
"Rectangular")) {
a = 1.0,
b = 0.0, c = 0.0; }
3640 if (!strcmp(name,
"Hanning")) {
a = 0.5,
b = 0.5, c = 0.0; }
3641 if (!strcmp(name,
"Hamming")) {
a = 0.54,
b = 0.46, c = 0.0; }
3642 if (!strcmp(name,
"Blackman")) {
a = 0.42,
b = 0.50, c = 0.08; }
3645 mfem_error(
"HypreSmoother::SetWindowByName : name not recognized!");
3663 mfem_error(
"HypreSmoother::SetOperator : not HypreParMatrix!");
3670 if (
B) {
delete B; }
3671 if (
X) {
delete X; }
3672 if (
V) {
delete V; }
3673 if (
Z) {
delete Z; }
3695 A->
Mult(ones, diag);
3706 d_l1_norms[i] = std::abs(d_l1_norms[i]);
3710#if MFEM_HYPRE_VERSION < 22100
3722#elif defined(HYPRE_USING_GPU)
3752#if MFEM_HYPRE_VERSION <= 22200
3761 else if (
type == 1001 ||
type == 1002)
3771#if MFEM_HYPRE_VERSION <= 22200
3813 window_coeffs[i] =
a +
b*cos(t) +c*cos(2*t);
3817 real_t theta_pb = acos(1.0 -0.5*k_pb);
3819 cheby_coeffs[0] = (theta_pb +
sigma)/M_PI;
3823 cheby_coeffs[i] = 2.0*sin(t)/(i*M_PI);
3828 fir_coeffs[i] = window_coeffs[i]*cheby_coeffs[i];
3831 delete[] window_coeffs;
3832 delete[] cheby_coeffs;
3839 mfem_error(
"HypreSmoother::Mult (...) : HypreParMatrix A is missing");
3852 HYPRE_ParCSRDiagScale(NULL, *
A,
b, x);
3859 hypre_ParVectorSetConstantValues(x, 0.0);
3880 else if (
type == 1002)
3894 int hypre_type =
type;
3896 if (
type == 5) { hypre_type = 1; }
3900 hypre_ParCSRRelax(*
A,
b, hypre_type,
3907 hypre_ParCSRRelax(*
A,
b, hypre_type,
3917 MFEM_ASSERT(
b.Size() ==
NumCols(),
"");
3922 mfem_error(
"HypreSmoother::Mult (...) : HypreParMatrix A is missing");
3929 A -> GetGlobalNumRows(),
3931 A -> GetRowStarts());
3933 A -> GetGlobalNumCols(),
3935 A -> GetColStarts());
3973 if (!xshallow) { x = *
X; }
3983 mfem_error(
"HypreSmoother::MultTranspose (...) : undefined!\n");
3989 if (
B) {
delete B; }
3990 if (
X) {
delete X; }
3991 if (
V) {
delete V; }
3992 if (
Z) {
delete Z; }
4001 if (
X0) {
delete X0; }
4002 if (
X1) {
delete X1; }
4017 :
Solver(A_->Height(), A_->Width())
4029 MFEM_ASSERT(
b.Size() ==
NumCols(),
"");
4032 MFEM_VERIFY(
A != NULL,
"HypreParMatrix A is missing");
4082 MFEM_VERIFY(
A != NULL,
"HypreParMatrix A is missing");
4084 HYPRE_Int err_flag =
SetupFcn()(*
this, *
A,
b, x);
4088 { MFEM_WARNING(
"Error during setup! Error code: " << err_flag); }
4092 MFEM_VERIFY(!err_flag,
"Error during setup! Error code: " << err_flag);
4094 hypre_error_flag = 0;
4102 if (!x_shallow) { x = *
X; }
4110 mfem_error(
"HypreSolver::Mult (...) : HypreParMatrix A is missing");
4117 hypre_ParVectorSetConstantValues(x, 0.0);
4129 { MFEM_WARNING(
"Error during solve! Error code: " << err_flag); }
4133 MFEM_VERIFY(!err_flag,
"Error during solve! Error code: " << err_flag);
4135 hypre_error_flag = 0;
4142 if (!x_shallow) { x = *
X; }
4147 if (
B) {
delete B; }
4148 if (
X) {
delete X; }
4158 HYPRE_ParCSRPCGCreate(comm, &pcg_solver);
4167 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
4169 HYPRE_ParCSRPCGCreate(comm, &pcg_solver);
4175 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4196 HYPRE_PCGSetTol(pcg_solver, tol);
4201 HYPRE_PCGSetAbsoluteTol(pcg_solver, atol);
4206 HYPRE_PCGSetMaxIter(pcg_solver, max_iter);
4211 HYPRE_PCGSetLogging(pcg_solver, logging);
4216 HYPRE_ParCSRPCGSetPrintLevel(pcg_solver, print_lvl);
4221 precond = &precond_;
4223 HYPRE_ParCSRPCGSetPrecond(pcg_solver,
4231 HYPRE_PCGSetTwoNorm(pcg_solver, 1);
4232 if (res_frequency > 0)
4234 HYPRE_PCGSetRecomputeResidualP(pcg_solver, res_frequency);
4238 HYPRE_PCGSetResidualTol(pcg_solver, rtol);
4245 HYPRE_Int time_index = 0;
4246 HYPRE_Int num_iterations;
4249 HYPRE_Int print_level;
4251 HYPRE_PCGGetPrintLevel(pcg_solver, &print_level);
4252 HYPRE_ParCSRPCGSetPrintLevel(pcg_solver, print_level%3);
4254 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
4259 hypre_ParVectorSetConstantValues(x, 0.0);
4267 if (print_level > 0 && print_level < 3)
4269 time_index = hypre_InitializeTiming(
"PCG Setup");
4270 hypre_BeginTiming(time_index);
4273 HYPRE_ParCSRPCGSetup(pcg_solver, *
A,
b, x);
4276 if (print_level > 0 && print_level < 3)
4278 hypre_EndTiming(time_index);
4279 hypre_PrintTiming(
"Setup phase times", comm);
4280 hypre_FinalizeTiming(time_index);
4281 hypre_ClearTiming();
4285 if (print_level > 0 && print_level < 3)
4287 time_index = hypre_InitializeTiming(
"PCG Solve");
4288 hypre_BeginTiming(time_index);
4291 HYPRE_ParCSRPCGSolve(pcg_solver, *
A,
b, x);
4293 if (print_level > 0)
4295 if (print_level < 3)
4297 hypre_EndTiming(time_index);
4298 hypre_PrintTiming(
"Solve phase times", comm);
4299 hypre_FinalizeTiming(time_index);
4300 hypre_ClearTiming();
4303 HYPRE_ParCSRPCGGetNumIterations(pcg_solver, &num_iterations);
4304 HYPRE_ParCSRPCGGetFinalRelativeResidualNorm(pcg_solver,
4307 MPI_Comm_rank(comm, &myid);
4311 mfem::out <<
"PCG Iterations = " << num_iterations << endl
4312 <<
"Final PCG Relative Residual Norm = " << final_res_norm
4316 HYPRE_ParCSRPCGSetPrintLevel(pcg_solver, print_level);
4321 HYPRE_ParCSRPCGDestroy(pcg_solver);
4329 HYPRE_ParCSRGMRESCreate(comm, &gmres_solver);
4330 SetDefaultOptions();
4340 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
4342 HYPRE_ParCSRGMRESCreate(comm, &gmres_solver);
4343 SetDefaultOptions();
4346void HypreGMRES::SetDefaultOptions()
4352 HYPRE_ParCSRGMRESSetKDim(gmres_solver, k_dim);
4353 HYPRE_ParCSRGMRESSetMaxIter(gmres_solver, max_iter);
4354 HYPRE_ParCSRGMRESSetTol(gmres_solver, tol);
4360 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4381 HYPRE_GMRESSetTol(gmres_solver, tol);
4386 HYPRE_GMRESSetAbsoluteTol(gmres_solver, tol);
4391 HYPRE_GMRESSetMaxIter(gmres_solver, max_iter);
4396 HYPRE_GMRESSetKDim(gmres_solver, k_dim);
4401 HYPRE_GMRESSetLogging(gmres_solver, logging);
4406 HYPRE_GMRESSetPrintLevel(gmres_solver, print_lvl);
4411 precond = &precond_;
4413 HYPRE_ParCSRGMRESSetPrecond(gmres_solver,
4422 HYPRE_Int time_index = 0;
4423 HYPRE_Int num_iterations;
4426 HYPRE_Int print_level;
4428 HYPRE_GMRESGetPrintLevel(gmres_solver, &print_level);
4430 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
4435 hypre_ParVectorSetConstantValues(x, 0.0);
4443 if (print_level > 0)
4445 time_index = hypre_InitializeTiming(
"GMRES Setup");
4446 hypre_BeginTiming(time_index);
4449 HYPRE_ParCSRGMRESSetup(gmres_solver, *
A,
b, x);
4452 if (print_level > 0)
4454 hypre_EndTiming(time_index);
4455 hypre_PrintTiming(
"Setup phase times", comm);
4456 hypre_FinalizeTiming(time_index);
4457 hypre_ClearTiming();
4461 if (print_level > 0)
4463 time_index = hypre_InitializeTiming(
"GMRES Solve");
4464 hypre_BeginTiming(time_index);
4467 HYPRE_ParCSRGMRESSolve(gmres_solver, *
A,
b, x);
4469 if (print_level > 0)
4471 hypre_EndTiming(time_index);
4472 hypre_PrintTiming(
"Solve phase times", comm);
4473 hypre_FinalizeTiming(time_index);
4474 hypre_ClearTiming();
4476 HYPRE_ParCSRGMRESGetNumIterations(gmres_solver, &num_iterations);
4477 HYPRE_ParCSRGMRESGetFinalRelativeResidualNorm(gmres_solver,
4480 MPI_Comm_rank(comm, &myid);
4484 mfem::out <<
"GMRES Iterations = " << num_iterations << endl
4485 <<
"Final GMRES Relative Residual Norm = " << final_res_norm
4493 HYPRE_ParCSRGMRESDestroy(gmres_solver);
4501 HYPRE_ParCSRFlexGMRESCreate(comm, &fgmres_solver);
4502 SetDefaultOptions();
4512 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
4514 HYPRE_ParCSRFlexGMRESCreate(comm, &fgmres_solver);
4515 SetDefaultOptions();
4518void HypreFGMRES::SetDefaultOptions()
4524 HYPRE_ParCSRFlexGMRESSetKDim(fgmres_solver, k_dim);
4525 HYPRE_ParCSRFlexGMRESSetMaxIter(fgmres_solver, max_iter);
4526 HYPRE_ParCSRFlexGMRESSetTol(fgmres_solver, tol);
4532 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4553 HYPRE_ParCSRFlexGMRESSetTol(fgmres_solver, tol);
4558 HYPRE_ParCSRFlexGMRESSetMaxIter(fgmres_solver, max_iter);
4563 HYPRE_ParCSRFlexGMRESSetKDim(fgmres_solver, k_dim);
4568 HYPRE_ParCSRFlexGMRESSetLogging(fgmres_solver, logging);
4573 HYPRE_ParCSRFlexGMRESSetPrintLevel(fgmres_solver, print_lvl);
4578 precond = &precond_;
4579 HYPRE_ParCSRFlexGMRESSetPrecond(fgmres_solver,
4588 HYPRE_Int time_index = 0;
4589 HYPRE_Int num_iterations;
4592 HYPRE_Int print_level;
4594 HYPRE_FlexGMRESGetPrintLevel(fgmres_solver, &print_level);
4596 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
4601 hypre_ParVectorSetConstantValues(x, 0.0);
4609 if (print_level > 0)
4611 time_index = hypre_InitializeTiming(
"FGMRES Setup");
4612 hypre_BeginTiming(time_index);
4615 HYPRE_ParCSRFlexGMRESSetup(fgmres_solver, *
A,
b, x);
4618 if (print_level > 0)
4620 hypre_EndTiming(time_index);
4621 hypre_PrintTiming(
"Setup phase times", comm);
4622 hypre_FinalizeTiming(time_index);
4623 hypre_ClearTiming();
4627 if (print_level > 0)
4629 time_index = hypre_InitializeTiming(
"FGMRES Solve");
4630 hypre_BeginTiming(time_index);
4633 HYPRE_ParCSRFlexGMRESSolve(fgmres_solver, *
A,
b, x);
4635 if (print_level > 0)
4637 hypre_EndTiming(time_index);
4638 hypre_PrintTiming(
"Solve phase times", comm);
4639 hypre_FinalizeTiming(time_index);
4640 hypre_ClearTiming();
4642 HYPRE_ParCSRFlexGMRESGetNumIterations(fgmres_solver, &num_iterations);
4643 HYPRE_ParCSRFlexGMRESGetFinalRelativeResidualNorm(fgmres_solver,
4646 MPI_Comm_rank(comm, &myid);
4650 mfem::out <<
"FGMRES Iterations = " << num_iterations << endl
4651 <<
"Final FGMRES Relative Residual Norm = " << final_res_norm
4659 HYPRE_ParCSRFlexGMRESDestroy(fgmres_solver);
4666 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4683 HYPRE_ParaSailsCreate(comm, &sai_precond);
4684 SetDefaultOptions();
4691 HYPRE_ParCSRMatrixGetComm(
A, &comm);
4693 HYPRE_ParaSailsCreate(comm, &sai_precond);
4694 SetDefaultOptions();
4697void HypreParaSails::SetDefaultOptions()
4699 int sai_max_levels = 1;
4700 real_t sai_threshold = 0.1;
4703 real_t sai_loadbal = 0.0;
4705 int sai_logging = 1;
4707 HYPRE_ParaSailsSetParams(sai_precond, sai_threshold, sai_max_levels);
4708 HYPRE_ParaSailsSetFilter(sai_precond, sai_filter);
4709 HYPRE_ParaSailsSetSym(sai_precond, sai_sym);
4710 HYPRE_ParaSailsSetLoadbal(sai_precond, sai_loadbal);
4711 HYPRE_ParaSailsSetReuse(sai_precond, sai_reuse);
4712 HYPRE_ParaSailsSetLogging(sai_precond, sai_logging);
4715void HypreParaSails::ResetSAIPrecond(MPI_Comm comm)
4717 HYPRE_Int sai_max_levels;
4718 HYPRE_Real sai_threshold;
4719 HYPRE_Real sai_filter;
4721 HYPRE_Real sai_loadbal;
4722 HYPRE_Int sai_reuse;
4723 HYPRE_Int sai_logging;
4726 HYPRE_ParaSailsGetNlevels(sai_precond, &sai_max_levels);
4727 HYPRE_ParaSailsGetThresh(sai_precond, &sai_threshold);
4728 HYPRE_ParaSailsGetFilter(sai_precond, &sai_filter);
4729 HYPRE_ParaSailsGetSym(sai_precond, &sai_sym);
4730 HYPRE_ParaSailsGetLoadbal(sai_precond, &sai_loadbal);
4731 HYPRE_ParaSailsGetReuse(sai_precond, &sai_reuse);
4732 HYPRE_ParaSailsGetLogging(sai_precond, &sai_logging);
4734 HYPRE_ParaSailsDestroy(sai_precond);
4735 HYPRE_ParaSailsCreate(comm, &sai_precond);
4737 HYPRE_ParaSailsSetParams(sai_precond, sai_threshold, sai_max_levels);
4738 HYPRE_ParaSailsSetFilter(sai_precond, sai_filter);
4739 HYPRE_ParaSailsSetSym(sai_precond, sai_sym);
4740 HYPRE_ParaSailsSetLoadbal(sai_precond, sai_loadbal);
4741 HYPRE_ParaSailsSetReuse(sai_precond, sai_reuse);
4742 HYPRE_ParaSailsSetLogging(sai_precond, sai_logging);
4748 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4753 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
4754 ResetSAIPrecond(comm);
4771 HYPRE_ParaSailsSetParams(sai_precond, threshold, max_levels);
4776 HYPRE_ParaSailsSetFilter(sai_precond, filter);
4781 HYPRE_ParaSailsSetSym(sai_precond, sym);
4786 HYPRE_ParaSailsSetLoadbal(sai_precond, loadbal);
4791 HYPRE_ParaSailsSetReuse(sai_precond, reuse);
4796 HYPRE_ParaSailsSetLogging(sai_precond, logging);
4801 HYPRE_ParaSailsDestroy(sai_precond);
4807 HYPRE_EuclidCreate(comm, &euc_precond);
4808 SetDefaultOptions();
4815 HYPRE_ParCSRMatrixGetComm(
A, &comm);
4817 HYPRE_EuclidCreate(comm, &euc_precond);
4818 SetDefaultOptions();
4821void HypreEuclid::SetDefaultOptions()
4829 HYPRE_EuclidSetLevel(euc_precond, euc_level);
4830 HYPRE_EuclidSetStats(euc_precond, euc_stats);
4831 HYPRE_EuclidSetMem(euc_precond, euc_mem);
4832 HYPRE_EuclidSetBJ(euc_precond, euc_bj);
4833 HYPRE_EuclidSetRowScale(euc_precond, euc_ro_sc);
4838 HYPRE_EuclidSetLevel(euc_precond, level);
4843 HYPRE_EuclidSetStats(euc_precond, stats);
4848 HYPRE_EuclidSetMem(euc_precond, mem);
4853 HYPRE_EuclidSetBJ(euc_precond, bj);
4858 HYPRE_EuclidSetRowScale(euc_precond, row_scale);
4861void HypreEuclid::ResetEuclidPrecond(MPI_Comm comm)
4865 HYPRE_EuclidDestroy(euc_precond);
4866 HYPRE_EuclidCreate(comm, &euc_precond);
4868 SetDefaultOptions();
4874 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4879 HYPRE_ParCSRMatrixGetComm(*new_A, &comm);
4880 ResetEuclidPrecond(comm);
4897 HYPRE_EuclidDestroy(euc_precond);
4901#if MFEM_HYPRE_VERSION >= 21900
4904 HYPRE_ILUCreate(&ilu_precond);
4905 SetDefaultOptions();
4908void HypreILU::SetDefaultOptions()
4911 HYPRE_Int ilu_type = 0;
4912 HYPRE_ILUSetType(ilu_precond, ilu_type);
4915 HYPRE_Int max_iter = 1;
4916 HYPRE_ILUSetMaxIter(ilu_precond, max_iter);
4919 HYPRE_Real tol = 0.0;
4920 HYPRE_ILUSetTol(ilu_precond, tol);
4923 HYPRE_Int lev_fill = 1;
4924 HYPRE_ILUSetLevelOfFill(ilu_precond, lev_fill);
4927 HYPRE_Int reorder_type = 1;
4928 HYPRE_ILUSetLocalReordering(ilu_precond, reorder_type);
4931 HYPRE_Int print_level = 0;
4932 HYPRE_ILUSetPrintLevel(ilu_precond, print_level);
4935void HypreILU::ResetILUPrecond()
4939 HYPRE_ILUDestroy(ilu_precond);
4941 HYPRE_ILUCreate(&ilu_precond);
4942 SetDefaultOptions();
4947 HYPRE_ILUSetLevelOfFill(ilu_precond, lev_fill);
4952 HYPRE_ILUSetType(ilu_precond, ilu_type);
4957 HYPRE_ILUSetMaxIter(ilu_precond, max_iter);
4962 HYPRE_ILUSetTol(ilu_precond, tol);
4967 HYPRE_ILUSetLocalReordering(ilu_precond, reorder_type);
4972 HYPRE_ILUSetPrintLevel(ilu_precond, print_level);
4978 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4980 if (
A) { ResetILUPrecond(); }
4996 HYPRE_ILUDestroy(ilu_precond);
5003 HYPRE_BoomerAMGCreate(&amg_precond);
5004 SetDefaultOptions();
5009 HYPRE_BoomerAMGCreate(&amg_precond);
5010 SetDefaultOptions();
5013void HypreBoomerAMG::SetDefaultOptions()
5016 int coarsen_type, agg_levels, interp_type, Pmax, relax_type, relax_sweeps,
5017 print_level, max_levels;
5059 HYPRE_BoomerAMGSetCoarsenType(amg_precond, coarsen_type);
5060 HYPRE_BoomerAMGSetAggNumLevels(amg_precond, agg_levels);
5061 HYPRE_BoomerAMGSetRelaxType(amg_precond, relax_type);
5064 HYPRE_BoomerAMGSetNumSweeps(amg_precond, relax_sweeps);
5065 HYPRE_BoomerAMGSetStrongThreshold(amg_precond, theta);
5066 HYPRE_BoomerAMGSetInterpType(amg_precond, interp_type);
5067 HYPRE_BoomerAMGSetPMaxElmts(amg_precond, Pmax);
5068 HYPRE_BoomerAMGSetPrintLevel(amg_precond, print_level);
5069 HYPRE_BoomerAMGSetMaxLevels(amg_precond, max_levels);
5072 HYPRE_BoomerAMGSetMaxIter(amg_precond, 1);
5073 HYPRE_BoomerAMGSetTol(amg_precond, 0.0);
5076void HypreBoomerAMG::ResetAMGPrecond()
5078 HYPRE_Int coarsen_type;
5079 HYPRE_Int agg_levels;
5080 HYPRE_Int relax_type;
5081 HYPRE_Int relax_sweeps;
5083 HYPRE_Int interp_type;
5085 HYPRE_Int print_level;
5086 HYPRE_Int max_levels;
5088 HYPRE_Int nrbms = rbms.
Size();
5090 HYPRE_Int nodal_diag;
5091 HYPRE_Int relax_coarse;
5092 HYPRE_Int interp_vec_variant;
5094 HYPRE_Int smooth_interp_vectors;
5095 HYPRE_Int interp_refine;
5097 hypre_ParAMGData *amg_data = (hypre_ParAMGData *)amg_precond;
5100 HYPRE_BoomerAMGGetCoarsenType(amg_precond, &coarsen_type);
5101 agg_levels = hypre_ParAMGDataAggNumLevels(amg_data);
5102 relax_type = hypre_ParAMGDataUserRelaxType(amg_data);
5103 relax_sweeps = hypre_ParAMGDataUserNumSweeps(amg_data);
5104 HYPRE_BoomerAMGGetStrongThreshold(amg_precond, &theta);
5105 hypre_BoomerAMGGetInterpType(amg_precond, &interp_type);
5106 HYPRE_BoomerAMGGetPMaxElmts(amg_precond, &Pmax);
5107 HYPRE_BoomerAMGGetPrintLevel(amg_precond, &print_level);
5108 HYPRE_BoomerAMGGetMaxLevels(amg_precond, &max_levels);
5109 HYPRE_BoomerAMGGetNumFunctions(amg_precond, &
dim);
5112 nodal = hypre_ParAMGDataNodal(amg_data);
5113 nodal_diag = hypre_ParAMGDataNodalDiag(amg_data);
5114 HYPRE_BoomerAMGGetCycleRelaxType(amg_precond, &relax_coarse, 3);
5115 interp_vec_variant = hypre_ParAMGInterpVecVariant(amg_data);
5116 q_max = hypre_ParAMGInterpVecQMax(amg_data);
5117 smooth_interp_vectors = hypre_ParAMGSmoothInterpVectors(amg_data);
5118 interp_refine = hypre_ParAMGInterpRefine(amg_data);
5121 HYPRE_BoomerAMGDestroy(amg_precond);
5122 HYPRE_BoomerAMGCreate(&amg_precond);
5124 HYPRE_BoomerAMGSetCoarsenType(amg_precond, coarsen_type);
5125 HYPRE_BoomerAMGSetAggNumLevels(amg_precond, agg_levels);
5126 HYPRE_BoomerAMGSetRelaxType(amg_precond, relax_type);
5127 HYPRE_BoomerAMGSetNumSweeps(amg_precond, relax_sweeps);
5128 HYPRE_BoomerAMGSetMaxLevels(amg_precond, max_levels);
5129 HYPRE_BoomerAMGSetTol(amg_precond, 0.0);
5130 HYPRE_BoomerAMGSetMaxIter(amg_precond, 1);
5131 HYPRE_BoomerAMGSetStrongThreshold(amg_precond, theta);
5132 HYPRE_BoomerAMGSetInterpType(amg_precond, interp_type);
5133 HYPRE_BoomerAMGSetPMaxElmts(amg_precond, Pmax);
5134 HYPRE_BoomerAMGSetPrintLevel(amg_precond, print_level);
5135 HYPRE_BoomerAMGSetNumFunctions(amg_precond,
dim);
5138 HYPRE_BoomerAMGSetNodal(amg_precond, nodal);
5139 HYPRE_BoomerAMGSetNodalDiag(amg_precond, nodal_diag);
5140 HYPRE_BoomerAMGSetCycleRelaxType(amg_precond, relax_coarse, 3);
5141 HYPRE_BoomerAMGSetInterpVecVariant(amg_precond, interp_vec_variant);
5142 HYPRE_BoomerAMGSetInterpVecQMax(amg_precond, q_max);
5143 HYPRE_BoomerAMGSetSmoothInterpVectors(amg_precond, smooth_interp_vectors);
5144 HYPRE_BoomerAMGSetInterpRefine(amg_precond, interp_refine);
5146 HYPRE_BoomerAMGSetInterpVectors(amg_precond, rbms.
Size(), rbms.
GetData());
5153 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
5155 if (
A) { ResetAMGPrecond(); }
5171 HYPRE_BoomerAMGSetNumFunctions(amg_precond,
dim);
5179 HYPRE_Int *h_mapping = mfem_hypre_CTAlloc_host(HYPRE_Int,
height);
5181 MFEM_VERIFY(
height %
dim == 0,
"Ordering does not work as claimed!");
5183 for (
int i = 0; i <
dim; ++i)
5185 for (
int j = 0; j < h_nnodes; ++j)
5194 HYPRE_Int *mapping =
nullptr;
5195#if defined(hypre_IntArrayData) && defined(HYPRE_USING_GPU)
5198 mapping = mfem_hypre_CTAlloc(HYPRE_Int,
height);
5199 hypre_TMemcpy(mapping, h_mapping, HYPRE_Int,
height,
5200 HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
5201 mfem_hypre_TFree_host(h_mapping);
5206 mapping = h_mapping;
5211 HYPRE_BoomerAMGSetDofFunc(amg_precond, mapping);
5215 HYPRE_BoomerAMGSetAggNumLevels(amg_precond, 0);
5216 HYPRE_BoomerAMGSetStrongThreshold(amg_precond, 0.5);
5222 y = 0.0; y(0) = x(1); y(1) = -x(0);
5224static void func_ryz(
const Vector &x, Vector &y)
5226 y = 0.0; y(1) = x(2); y(2) = -x(1);
5228static void func_rzx(
const Vector &x, Vector &y)
5230 y = 0.0; y(2) = x(0); y(0) = -x(2);
5233void HypreBoomerAMG::RecomputeRBMs()
5236 Array<HypreParVector*> gf_rbms;
5239 for (
int i = 0; i < rbms.
Size(); i++)
5241 HYPRE_ParVectorDestroy(rbms[i]);
5248 VectorFunctionCoefficient coeff_rxy(2, func_rxy);
5250 ParGridFunction rbms_rxy(fespace);
5251 rbms_rxy.ProjectCoefficient(coeff_rxy);
5254 gf_rbms.SetSize(nrbms);
5256 rbms_rxy.GetTrueDofs(*gf_rbms[0]);
5262 VectorFunctionCoefficient coeff_rxy(3, func_rxy);
5263 VectorFunctionCoefficient coeff_ryz(3, func_ryz);
5264 VectorFunctionCoefficient coeff_rzx(3, func_rzx);
5266 ParGridFunction rbms_rxy(fespace);
5267 ParGridFunction rbms_ryz(fespace);
5268 ParGridFunction rbms_rzx(fespace);
5269 rbms_rxy.ProjectCoefficient(coeff_rxy);
5270 rbms_ryz.ProjectCoefficient(coeff_ryz);
5271 rbms_rzx.ProjectCoefficient(coeff_rzx);
5274 gf_rbms.SetSize(nrbms);
5278 rbms_rxy.GetTrueDofs(*gf_rbms[0]);
5279 rbms_ryz.GetTrueDofs(*gf_rbms[1]);
5280 rbms_rzx.GetTrueDofs(*gf_rbms[2]);
5289 for (
int i = 0; i < nrbms; i++)
5291 rbms[i] = gf_rbms[i]->StealParVector();
5297 bool interp_refine_)
5299#ifdef HYPRE_USING_GPU
5302 MFEM_ABORT(
"this method is not supported in hypre built with GPU support");
5307 this->fespace = fespace_;
5317 int relax_coarse = 8;
5320 int interp_vec_variant = 2;
5322 int smooth_interp_vectors = 1;
5326 int interp_refine = interp_refine_;
5328 HYPRE_BoomerAMGSetNodal(amg_precond, nodal);
5329 HYPRE_BoomerAMGSetNodalDiag(amg_precond, nodal_diag);
5330 HYPRE_BoomerAMGSetCycleRelaxType(amg_precond, relax_coarse, 3);
5331 HYPRE_BoomerAMGSetInterpVecVariant(amg_precond, interp_vec_variant);
5332 HYPRE_BoomerAMGSetInterpVecQMax(amg_precond, q_max);
5333 HYPRE_BoomerAMGSetSmoothInterpVectors(amg_precond, smooth_interp_vectors);
5334 HYPRE_BoomerAMGSetInterpRefine(amg_precond, interp_refine);
5337 HYPRE_BoomerAMGSetInterpVectors(amg_precond, rbms.
Size(), rbms.
GetData());
5346#if MFEM_HYPRE_VERSION >= 21800
5349 const std::string &prerelax,
5350 const std::string &postrelax)
5354 int interp_type = 100;
5355 int relax_type = 10;
5356 int coarsen_type = 6;
5357 real_t strength_tolC = 0.1;
5358 real_t strength_tolR = 0.01;
5359 real_t filter_tolR = 0.0;
5360 real_t filterA_tol = 0.0;
5363 int ns_down = 0, ns_up = 0, ns_coarse;
5366 ns_down =
static_cast<int>(prerelax.length());
5367 ns_up =
static_cast<int>(postrelax.length());
5371 HYPRE_Int **grid_relax_points = mfem_hypre_TAlloc(HYPRE_Int*, 4);
5372 grid_relax_points[0] = NULL;
5373 grid_relax_points[1] = mfem_hypre_TAlloc(HYPRE_Int, ns_down);
5374 grid_relax_points[2] = mfem_hypre_TAlloc(HYPRE_Int, ns_up);
5375 grid_relax_points[3] = mfem_hypre_TAlloc(HYPRE_Int, 1);
5376 grid_relax_points[3][0] = 0;
5379 for (
int i = 0; i<ns_down; i++)
5381 if (prerelax[i] ==
'F')
5383 grid_relax_points[1][i] = -1;
5385 else if (prerelax[i] ==
'C')
5387 grid_relax_points[1][i] = 1;
5389 else if (prerelax[i] ==
'A')
5391 grid_relax_points[1][i] = 0;
5396 for (
int i = 0; i<ns_up; i++)
5398 if (postrelax[i] ==
'F')
5400 grid_relax_points[2][i] = -1;
5402 else if (postrelax[i] ==
'C')
5404 grid_relax_points[2][i] = 1;
5406 else if (postrelax[i] ==
'A')
5408 grid_relax_points[2][i] = 0;
5412 HYPRE_BoomerAMGSetRestriction(amg_precond, distanceR);
5414 HYPRE_BoomerAMGSetGridRelaxPoints(amg_precond, grid_relax_points);
5416 HYPRE_BoomerAMGSetInterpType(amg_precond, interp_type);
5421 HYPRE_BoomerAMGSetSabs(amg_precond, Sabs);
5424 HYPRE_BoomerAMGSetCoarsenType(amg_precond, coarsen_type);
5427 HYPRE_BoomerAMGSetAggNumLevels(amg_precond, 0);
5429 HYPRE_BoomerAMGSetStrongThreshold(amg_precond, strength_tolC);
5433 HYPRE_BoomerAMGSetStrongThresholdR(amg_precond, strength_tolR);
5434 HYPRE_BoomerAMGSetFilterThresholdR(amg_precond, filter_tolR);
5437 if (relax_type > -1)
5439 HYPRE_BoomerAMGSetRelaxType(amg_precond, relax_type);
5444 HYPRE_BoomerAMGSetCycleNumSweeps(amg_precond, ns_coarse, 3);
5445 HYPRE_BoomerAMGSetCycleNumSweeps(amg_precond, ns_down, 1);
5446 HYPRE_BoomerAMGSetCycleNumSweeps(amg_precond, ns_up, 2);
5448 HYPRE_BoomerAMGSetADropTol(amg_precond, filterA_tol);
5450 HYPRE_BoomerAMGSetADropType(amg_precond, -1);
5458 for (
int i = 0; i < rbms.
Size(); i++)
5460 HYPRE_ParVectorDestroy(rbms[i]);
5463 HYPRE_BoomerAMGDestroy(amg_precond);
5489 MFEM_ASSERT(G != NULL,
"");
5490 MFEM_ASSERT(x != NULL,
"");
5491 MFEM_ASSERT(y != NULL,
"");
5492 int sdim = (z == NULL) ? 2 : 3;
5493 int cycle_type = 13;
5494 MakeSolver(sdim, cycle_type);
5496 HYPRE_ParVector pz = z ?
static_cast<HYPRE_ParVector
>(*z) : NULL;
5497 HYPRE_AMSSetCoordinateVectors(ams, *x, *y, pz);
5498 HYPRE_AMSSetDiscreteGradient(ams, *G);
5506 int cycle_type = 13;
5511 trace_space = trace_space || rt_trace_space;
5517 "HypreAMS does not support variable order spaces");
5524 MakeSolver(std::max(sdim, vdim), cycle_type);
5525 MakeGradientAndInterpolation(edge_fespace, cycle_type);
5529 delete edge_fespace;
5534void HypreAMS::MakeSolver(
int sdim,
int cycle_type)
5540 int amg_coarsen_type = hypre_gpu ? 8 : 10;
5541 int amg_agg_levels = hypre_gpu ? 0 : 1;
5542 int amg_rlx_type = hypre_gpu ? 18 : 8;
5543 int rlx_type = hypre_gpu ? 1: 2;
5545 int amg_interp_type = 6;
5549 ams_cycle_type = cycle_type;
5550 HYPRE_AMSCreate(&ams);
5552 HYPRE_AMSSetDimension(ams, sdim);
5553 HYPRE_AMSSetTol(ams, 0.0);
5554 HYPRE_AMSSetMaxIter(ams, 1);
5555 HYPRE_AMSSetCycleType(ams, cycle_type);
5556 HYPRE_AMSSetPrintLevel(ams, 1);
5559 HYPRE_AMSSetSmoothingOptions(ams, rlx_type, rlx_sweeps, rlx_weight, rlx_omega);
5560 HYPRE_AMSSetAlphaAMGOptions(ams, amg_coarsen_type, amg_agg_levels, amg_rlx_type,
5561 theta, amg_interp_type, amg_Pmax);
5562 HYPRE_AMSSetBetaAMGOptions(ams, amg_coarsen_type, amg_agg_levels, amg_rlx_type,
5563 theta, amg_interp_type, amg_Pmax);
5565 HYPRE_AMSSetAlphaAMGCoarseRelaxType(ams, amg_rlx_type);
5566 HYPRE_AMSSetBetaAMGCoarseRelaxType(ams, amg_rlx_type);
5575void HypreAMS::MakeGradientAndInterpolation(
5576 ParFiniteElementSpace *edge_fespace,
int cycle_type)
5578 const FiniteElementCollection *edge_fec = edge_fespace->FEColl();
5579 bool trace_space =
dynamic_cast<const ND_Trace_FECollection *
>(edge_fec);
5581 ParMesh *pmesh = edge_fespace->GetParMesh();
5583 int sdim = pmesh->SpaceDimension();
5584 int vdim = edge_fespace->FEColl()->GetRangeDim(
dim - trace_space);
5587 MFEM_VERIFY(!edge_fespace->IsVariableOrder(),
5588 "HypreAMS does not support variable order spaces");
5589 int p = edge_fec->GetOrder() + (
dim - trace_space == 1 ? 1 : 0);
5592 FiniteElementCollection *vert_fec;
5595 vert_fec =
new H1_Trace_FECollection(
p,
dim);
5599 vert_fec =
new H1_FECollection(
p,
dim);
5601 ParFiniteElementSpace *vert_fespace =
new ParFiniteElementSpace(pmesh,
5605 ParDiscreteLinearOperator *grad;
5606 grad =
new ParDiscreteLinearOperator(vert_fespace, edge_fespace);
5609 grad->AddTraceFaceInterpolator(
new GradientInterpolator);
5613 grad->AddDomainInterpolator(
new GradientInterpolator);
5617 G = grad->ParallelAssemble();
5618 HYPRE_AMSSetDiscreteGradient(ams, *G);
5623 Pi = Pix = Piy = Piz = NULL;
5624 if (
p == 1 && pmesh->GetNodes() == NULL && vdim <= sdim)
5626 ParGridFunction x_coord(vert_fespace);
5627 ParGridFunction y_coord(vert_fespace);
5628 ParGridFunction z_coord(vert_fespace);
5630 for (
int i = 0; i < pmesh->GetNV(); i++)
5632 coord = pmesh -> GetVertex(i);
5633 x_coord(i) = coord[0];
5634 if (sdim >= 2) { y_coord(i) = coord[1]; }
5635 if (sdim == 3) { z_coord(i) = coord[2]; }
5637 x = x_coord.ParallelProject();
5644 y = y_coord.ParallelProject();
5649 z = z_coord.ParallelProject();
5653 HYPRE_AMSSetCoordinateVectors(ams,
5654 x ? (HYPRE_ParVector)(*x) : NULL,
5655 y ? (HYPRE_ParVector)(*y) : NULL,
5656 z ? (HYPRE_ParVector)(*z) : NULL);
5660 ParFiniteElementSpace *vert_fespace_d =
5661 new ParFiniteElementSpace(pmesh, vert_fec, std::max(sdim, vdim),
5664 ParDiscreteLinearOperator *id_ND;
5665 id_ND =
new ParDiscreteLinearOperator(vert_fespace_d, edge_fespace);
5668 id_ND->AddTraceFaceInterpolator(
new IdentityInterpolator);
5672 id_ND->AddDomainInterpolator(
new IdentityInterpolator);
5677 if (cycle_type < 10)
5679 Pi = id_ND->ParallelAssemble();
5683 Array2D<HypreParMatrix *> Pi_blocks;
5684 id_ND->GetParBlocks(Pi_blocks);
5685 Pix = Pi_blocks(0,0);
5686 if (std::max(sdim, vdim) >= 2) { Piy = Pi_blocks(0,1); }
5687 if (std::max(sdim, vdim) == 3) { Piz = Pi_blocks(0,2); }
5692 HYPRE_ParCSRMatrix HY_Pi = (Pi) ? (HYPRE_ParCSRMatrix) *Pi : NULL;
5693 HYPRE_ParCSRMatrix HY_Pix = (Pix) ? (HYPRE_ParCSRMatrix) *Pix : NULL;
5694 HYPRE_ParCSRMatrix HY_Piy = (Piy) ? (HYPRE_ParCSRMatrix) *Piy : NULL;
5695 HYPRE_ParCSRMatrix HY_Piz = (Piz) ? (HYPRE_ParCSRMatrix) *Piz : NULL;
5696 HYPRE_AMSSetInterpolations(ams, HY_Pi, HY_Pix, HY_Piy, HY_Piz);
5698 delete vert_fespace_d;
5701 delete vert_fespace;
5705void HypreAMS::ResetAMSPrecond()
5707#if MFEM_HYPRE_VERSION >= 22600
5709 auto *ams_data = (hypre_AMSData *)ams;
5712 HYPRE_Int
dim = hypre_AMSDataDimension(ams_data);
5715 hypre_ParCSRMatrix *hy_G = hypre_AMSDataDiscreteGradient(ams_data);
5717 HYPRE_Int beta_is_zero = hypre_AMSDataBetaIsZero(ams_data);
5720 hypre_ParCSRMatrix *hy_Pi hypre_AMSDataPiInterpolation(ams_data);
5721 hypre_ParCSRMatrix *hy_Pix = ams_data->Pix;
5722 hypre_ParCSRMatrix *hy_Piy = ams_data->Piy;
5723 hypre_ParCSRMatrix *hy_Piz = ams_data->Piz;
5724 HYPRE_Int owns_Pi = hypre_AMSDataOwnsPiInterpolation(ams_data);
5727 ams_data->owns_Pi = 0;
5731 hypre_ParVector *hy_x = hypre_AMSDataVertexCoordinateX(ams_data);
5732 hypre_ParVector *hy_y = hypre_AMSDataVertexCoordinateY(ams_data);
5733 hypre_ParVector *hy_z = hypre_AMSDataVertexCoordinateZ(ams_data);
5736 HYPRE_Int maxit = hypre_AMSDataMaxIter(ams_data);
5737 HYPRE_Real tol = hypre_AMSDataTol(ams_data);
5738 HYPRE_Int cycle_type = hypre_AMSDataCycleType(ams_data);
5739 HYPRE_Int ams_print_level = hypre_AMSDataPrintLevel(ams_data);
5742 HYPRE_Int A_relax_type = hypre_AMSDataARelaxType(ams_data);
5743 HYPRE_Int A_relax_times = hypre_AMSDataARelaxTimes(ams_data);
5744 HYPRE_Real A_relax_weight = hypre_AMSDataARelaxWeight(ams_data);
5745 HYPRE_Real A_omega = hypre_AMSDataAOmega(ams_data);
5746 HYPRE_Int A_cheby_order = hypre_AMSDataAChebyOrder(ams_data);
5747 HYPRE_Real A_cheby_fraction = hypre_AMSDataAChebyFraction(ams_data);
5749 HYPRE_Int B_Pi_coarsen_type = hypre_AMSDataPoissonAlphaAMGCoarsenType(ams_data);
5750 HYPRE_Int B_Pi_agg_levels = hypre_AMSDataPoissonAlphaAMGAggLevels(ams_data);
5751 HYPRE_Int B_Pi_relax_type = hypre_AMSDataPoissonAlphaAMGRelaxType(ams_data);
5752 HYPRE_Int B_Pi_coarse_relax_type = ams_data->B_Pi_coarse_relax_type;
5753 HYPRE_Real B_Pi_theta = hypre_AMSDataPoissonAlphaAMGStrengthThreshold(ams_data);
5754 HYPRE_Int B_Pi_interp_type = ams_data->B_Pi_interp_type;
5755 HYPRE_Int B_Pi_Pmax = ams_data->B_Pi_Pmax;
5757 HYPRE_Int B_G_coarsen_type = hypre_AMSDataPoissonBetaAMGCoarsenType(ams_data);
5758 HYPRE_Int B_G_agg_levels = hypre_AMSDataPoissonBetaAMGAggLevels(ams_data);
5759 HYPRE_Int B_G_relax_type = hypre_AMSDataPoissonBetaAMGRelaxType(ams_data);
5760 HYPRE_Int B_G_coarse_relax_type = ams_data->B_G_coarse_relax_type;
5761 HYPRE_Real B_G_theta = hypre_AMSDataPoissonBetaAMGStrengthThreshold(ams_data);
5762 HYPRE_Int B_G_interp_type = ams_data->B_G_interp_type;
5763 HYPRE_Int B_G_Pmax = ams_data->B_G_Pmax;
5765 HYPRE_AMSDestroy(ams);
5766 HYPRE_AMSCreate(&ams);
5767 ams_data = (hypre_AMSData *)ams;
5769 HYPRE_AMSSetDimension(ams,
dim);
5770 HYPRE_AMSSetTol(ams, tol);
5771 HYPRE_AMSSetMaxIter(ams, maxit);
5772 HYPRE_AMSSetCycleType(ams, cycle_type);
5773 HYPRE_AMSSetPrintLevel(ams, ams_print_level);
5775 HYPRE_AMSSetCoordinateVectors(ams, hy_x, hy_y, hy_z);
5777 HYPRE_AMSSetDiscreteGradient(ams, hy_G);
5778 HYPRE_AMSSetCoordinateVectors(ams, hy_x, hy_y, hy_z);
5779 HYPRE_AMSSetInterpolations(ams, hy_Pi, hy_Pix, hy_Piy, hy_Piz);
5780 ams_data->owns_Pi = owns_Pi;
5783 HYPRE_AMSSetSmoothingOptions(ams, A_relax_type, A_relax_times, A_relax_weight,
5786 hypre_AMSDataAChebyOrder(ams_data) = A_cheby_order;
5787 hypre_AMSDataAChebyFraction(ams_data) = A_cheby_fraction;
5789 HYPRE_AMSSetAlphaAMGOptions(ams, B_Pi_coarsen_type, B_Pi_agg_levels,
5791 B_Pi_theta, B_Pi_interp_type, B_Pi_Pmax);
5792 HYPRE_AMSSetBetaAMGOptions(ams, B_G_coarsen_type, B_G_agg_levels,
5794 B_G_theta, B_G_interp_type, B_G_Pmax);
5796 HYPRE_AMSSetAlphaAMGCoarseRelaxType(ams, B_Pi_coarse_relax_type);
5797 HYPRE_AMSSetBetaAMGCoarseRelaxType(ams, B_G_coarse_relax_type);
5799 ams_data->beta_is_zero = beta_is_zero;
5802 HYPRE_AMSDestroy(ams);
5804 MakeSolver(space_dim, ams_cycle_type);
5806 HYPRE_AMSSetPrintLevel(ams, print_level);
5807 if (singular) { HYPRE_AMSSetBetaPoissonMatrix(ams, NULL); }
5809 HYPRE_AMSSetDiscreteGradient(ams, *G);
5812 HYPRE_AMSSetCoordinateVectors(ams,
5813 x ? (HYPRE_ParVector)(*x) : nullptr,
5814 y ? (HYPRE_ParVector)(*y) : nullptr,
5815 z ? (HYPRE_ParVector)(*z) : nullptr);
5819 HYPRE_AMSSetInterpolations(ams,
5820 Pi ? (HYPRE_ParCSRMatrix) *Pi : nullptr,
5821 Pix ? (HYPRE_ParCSRMatrix) *Pix : nullptr,
5822 Piy ? (HYPRE_ParCSRMatrix) *Piy : nullptr,
5823 Piz ? (HYPRE_ParCSRMatrix) *Piz : nullptr);
5831 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
5833 if (
A) { ResetAMSPrecond(); }
5850 HYPRE_AMSDestroy(ams);
5865 HYPRE_AMSSetPrintLevel(ams, print_lvl);
5866 print_level = print_lvl;
5884 x(x_), y(y_), z(z_),
5886 ND_Pi(NULL), ND_Pix(NULL), ND_Piy(NULL), ND_Piz(NULL),
5887 RT_Pi(NULL), RT_Pix(NULL), RT_Piy(NULL), RT_Piz(NULL)
5889 MFEM_ASSERT(C != NULL,
"");
5890 MFEM_ASSERT(G != NULL,
"");
5891 MFEM_ASSERT(x != NULL,
"");
5892 MFEM_ASSERT(y != NULL,
"");
5893 MFEM_ASSERT(z != NULL,
"");
5897 HYPRE_ADSSetCoordinateVectors(ads, *x, *y, *z);
5898 HYPRE_ADSSetDiscreteCurl(ads, *C);
5899 HYPRE_ADSSetDiscreteGradient(ads, *G);
5902void HypreADS::MakeSolver()
5908 int rlx_type = hypre_gpu ? 1 : 2;
5909 int amg_coarsen_type = hypre_gpu ? 8 : 10;
5910 int amg_agg_levels = hypre_gpu ? 0 : 1;
5911 int amg_rlx_type = hypre_gpu ? 18 : 8;
5913 int amg_interp_type = 6;
5916 HYPRE_ADSCreate(&ads);
5918 HYPRE_ADSSetTol(ads, 0.0);
5919 HYPRE_ADSSetMaxIter(ads, 1);
5920 HYPRE_ADSSetCycleType(ads, cycle_type);
5921 HYPRE_ADSSetPrintLevel(ads, 1);
5924 HYPRE_ADSSetSmoothingOptions(ads, rlx_type, rlx_sweeps, rlx_weight, rlx_omega);
5925 HYPRE_ADSSetAMGOptions(ads, amg_coarsen_type, amg_agg_levels, amg_rlx_type,
5926 theta, amg_interp_type, amg_Pmax);
5927 HYPRE_ADSSetAMSOptions(ads, ams_cycle_type, amg_coarsen_type, amg_agg_levels,
5928 amg_rlx_type, theta, amg_interp_type, amg_Pmax);
5937void HypreADS::MakeDiscreteMatrices(ParFiniteElementSpace *face_fespace)
5939 const FiniteElementCollection *face_fec = face_fespace->FEColl();
5941 (
dynamic_cast<const RT_Trace_FECollection*
>(face_fec) != NULL);
5943 MFEM_VERIFY(!face_fespace->IsVariableOrder(),
"");
5944 int p = face_fec->GetOrder();
5947 ParMesh *pmesh = (ParMesh *) face_fespace->GetMesh();
5948 FiniteElementCollection *vert_fec, *edge_fec;
5951 vert_fec =
new H1_Trace_FECollection(
p, 3);
5952 edge_fec =
new ND_Trace_FECollection(
p, 3);
5956 vert_fec =
new H1_FECollection(
p, 3);
5957 edge_fec =
new ND_FECollection(
p, 3);
5960 ParFiniteElementSpace *vert_fespace =
new ParFiniteElementSpace(pmesh,
5962 ParFiniteElementSpace *edge_fespace =
new ParFiniteElementSpace(pmesh,
5966 if (
p == 1 && pmesh->GetNodes() == NULL)
5968 ParGridFunction x_coord(vert_fespace);
5969 ParGridFunction y_coord(vert_fespace);
5970 ParGridFunction z_coord(vert_fespace);
5972 for (
int i = 0; i < pmesh->GetNV(); i++)
5974 coord = pmesh -> GetVertex(i);
5975 x_coord(i) = coord[0];
5976 y_coord(i) = coord[1];
5977 z_coord(i) = coord[2];
5979 x = x_coord.ParallelProject();
5980 y = y_coord.ParallelProject();
5981 z = z_coord.ParallelProject();
5985 HYPRE_ADSSetCoordinateVectors(ads, *x, *y, *z);
5995 ParDiscreteLinearOperator *curl;
5996 curl =
new ParDiscreteLinearOperator(edge_fespace, face_fespace);
5999 curl->AddTraceFaceInterpolator(
new CurlInterpolator);
6003 curl->AddDomainInterpolator(
new CurlInterpolator);
6007 C = curl->ParallelAssemble();
6009 HYPRE_ADSSetDiscreteCurl(ads, *C);
6013 ParDiscreteLinearOperator *grad;
6014 grad =
new ParDiscreteLinearOperator(vert_fespace, edge_fespace);
6017 grad->AddTraceFaceInterpolator(
new GradientInterpolator);
6021 grad->AddDomainInterpolator(
new GradientInterpolator);
6025 G = grad->ParallelAssemble();
6028 HYPRE_ADSSetDiscreteGradient(ads, *G);
6032 RT_Pi = RT_Pix = RT_Piy = RT_Piz = NULL;
6033 ND_Pi = ND_Pix = ND_Piy = ND_Piz = NULL;
6034 if (
p > 1 || pmesh->GetNodes() != NULL)
6036 ParFiniteElementSpace *vert_fespace_d
6039 ParDiscreteLinearOperator *id_ND;
6040 id_ND =
new ParDiscreteLinearOperator(vert_fespace_d, edge_fespace);
6043 id_ND->AddTraceFaceInterpolator(
new IdentityInterpolator);
6047 id_ND->AddDomainInterpolator(
new IdentityInterpolator);
6052 if (ams_cycle_type < 10)
6054 ND_Pi = id_ND->ParallelAssemble();
6060 Array2D<HypreParMatrix *> ND_Pi_blocks;
6061 id_ND->GetParBlocks(ND_Pi_blocks);
6062 ND_Pix = ND_Pi_blocks(0,0);
6063 ND_Piy = ND_Pi_blocks(0,1);
6064 ND_Piz = ND_Pi_blocks(0,2);
6069 ParDiscreteLinearOperator *id_RT;
6070 id_RT =
new ParDiscreteLinearOperator(vert_fespace_d, face_fespace);
6073 id_RT->AddTraceFaceInterpolator(
new NormalInterpolator);
6077 id_RT->AddDomainInterpolator(
new IdentityInterpolator);
6082 if (cycle_type < 10)
6084 RT_Pi = id_RT->ParallelAssemble();
6089 Array2D<HypreParMatrix *> RT_Pi_blocks;
6090 id_RT->GetParBlocks(RT_Pi_blocks);
6091 RT_Pix = RT_Pi_blocks(0,0);
6092 RT_Piy = RT_Pi_blocks(0,1);
6093 RT_Piz = RT_Pi_blocks(0,2);
6098 HYPRE_ParCSRMatrix HY_RT_Pi, HY_RT_Pix, HY_RT_Piy, HY_RT_Piz;
6099 HY_RT_Pi = (RT_Pi) ? (HYPRE_ParCSRMatrix) *RT_Pi : NULL;
6100 HY_RT_Pix = (RT_Pix) ? (HYPRE_ParCSRMatrix) *RT_Pix : NULL;
6101 HY_RT_Piy = (RT_Piy) ? (HYPRE_ParCSRMatrix) *RT_Piy : NULL;
6102 HY_RT_Piz = (RT_Piz) ? (HYPRE_ParCSRMatrix) *RT_Piz : NULL;
6103 HYPRE_ParCSRMatrix HY_ND_Pi, HY_ND_Pix, HY_ND_Piy, HY_ND_Piz;
6104 HY_ND_Pi = (ND_Pi) ? (HYPRE_ParCSRMatrix) *ND_Pi : NULL;
6105 HY_ND_Pix = (ND_Pix) ? (HYPRE_ParCSRMatrix) *ND_Pix : NULL;
6106 HY_ND_Piy = (ND_Piy) ? (HYPRE_ParCSRMatrix) *ND_Piy : NULL;
6107 HY_ND_Piz = (ND_Piz) ? (HYPRE_ParCSRMatrix) *ND_Piz : NULL;
6108 HYPRE_ADSSetInterpolations(ads,
6109 HY_RT_Pi, HY_RT_Pix, HY_RT_Piy, HY_RT_Piz,
6110 HY_ND_Pi, HY_ND_Pix, HY_ND_Piy, HY_ND_Piz);
6112 delete vert_fespace_d;
6116 delete vert_fespace;
6118 delete edge_fespace;
6121void HypreADS::Init(ParFiniteElementSpace *face_fespace)
6124 MakeDiscreteMatrices(face_fespace);
6127void HypreADS::ResetADSPrecond()
6129 HYPRE_ADSDestroy(ads);
6133 HYPRE_ADSSetPrintLevel(ads, print_level);
6135 HYPRE_ADSSetDiscreteCurl(ads, *C);
6136 HYPRE_ADSSetDiscreteGradient(ads, *G);
6139 MFEM_VERIFY(x && y && z,
"");
6140 HYPRE_ADSSetCoordinateVectors(ads, *x, *y, *z);
6144 HYPRE_ParCSRMatrix HY_RT_Pi, HY_RT_Pix, HY_RT_Piy, HY_RT_Piz;
6145 HY_RT_Pi = (RT_Pi) ? (HYPRE_ParCSRMatrix) *RT_Pi : NULL;
6146 HY_RT_Pix = (RT_Pix) ? (HYPRE_ParCSRMatrix) *RT_Pix : NULL;
6147 HY_RT_Piy = (RT_Piy) ? (HYPRE_ParCSRMatrix) *RT_Piy : NULL;
6148 HY_RT_Piz = (RT_Piz) ? (HYPRE_ParCSRMatrix) *RT_Piz : NULL;
6149 HYPRE_ParCSRMatrix HY_ND_Pi, HY_ND_Pix, HY_ND_Piy, HY_ND_Piz;
6150 HY_ND_Pi = (ND_Pi) ? (HYPRE_ParCSRMatrix) *ND_Pi : NULL;
6151 HY_ND_Pix = (ND_Pix) ? (HYPRE_ParCSRMatrix) *ND_Pix : NULL;
6152 HY_ND_Piy = (ND_Piy) ? (HYPRE_ParCSRMatrix) *ND_Piy : NULL;
6153 HY_ND_Piz = (ND_Piz) ? (HYPRE_ParCSRMatrix) *ND_Piz : NULL;
6154 HYPRE_ADSSetInterpolations(ads,
6155 HY_RT_Pi, HY_RT_Pix, HY_RT_Piy, HY_RT_Piz,
6156 HY_ND_Pi, HY_ND_Pix, HY_ND_Piy, HY_ND_Piz);
6163 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
6165 if (
A) { ResetADSPrecond(); }
6182 HYPRE_ADSDestroy(ads);
6204 HYPRE_ADSSetPrintLevel(ads, print_lvl);
6205 print_level = print_lvl;
6208HypreLOBPCG::HypreMultiVector::HypreMultiVector(
int n,
HypreParVector & v,
6209 mv_InterfaceInterpreter & interpreter)
6213 mv_ptr = mv_MultiVectorCreateFromSampleVector(&interpreter, nv,
6214 (HYPRE_ParVector)v);
6216 HYPRE_ParVector* vecs = NULL;
6218 mv_TempMultiVector* tmp =
6219 (mv_TempMultiVector*)mv_MultiVectorGetData(mv_ptr);
6220 vecs = (HYPRE_ParVector*)(tmp -> vector);
6223 hpv =
new HypreParVector*[nv];
6224 for (
int i=0; i<nv; i++)
6226 hpv[i] =
new HypreParVector(vecs[i]);
6230HypreLOBPCG::HypreMultiVector::~HypreMultiVector()
6234 for (
int i=0; i<nv; i++)
6241 mv_MultiVectorDestroy(mv_ptr);
6245HypreLOBPCG::HypreMultiVector::Randomize(HYPRE_Int seed_)
6247 mv_MultiVectorSetRandom(mv_ptr, seed_);
6251HypreLOBPCG::HypreMultiVector::GetVector(
unsigned int i)
6253 MFEM_ASSERT((
int)i < nv,
"index out of range");
6259HypreLOBPCG::HypreMultiVector::StealVectors()
6261 HypreParVector ** hpv_ret = hpv;
6265 mv_TempMultiVector * mv_tmp =
6266 (mv_TempMultiVector*)mv_MultiVectorGetData(mv_ptr);
6268 mv_tmp->ownsVectors = 0;
6270 for (
int i=0; i<nv; i++)
6272 hpv_ret[i]->SetOwnership(1);
6290 MPI_Comm_size(comm,&numProcs);
6291 MPI_Comm_rank(comm,&myid);
6293 HYPRE_ParCSRSetupInterpreter(&interpreter);
6294 HYPRE_ParCSRSetupMatvec(&matvec_fn);
6295 HYPRE_LOBPCGCreate(&interpreter, &matvec_fn, &lobpcg_solver);
6304 HYPRE_LOBPCGDestroy(lobpcg_solver);
6310 HYPRE_LOBPCGSetTol(lobpcg_solver, tol);
6316#if MFEM_HYPRE_VERSION >= 21101
6317 HYPRE_LOBPCGSetRTol(lobpcg_solver, rel_tol);
6319 MFEM_ABORT(
"This method requires HYPRE version >= 2.11.1");
6326 HYPRE_LOBPCGSetMaxIter(lobpcg_solver, max_iter);
6334 HYPRE_LOBPCGSetPrintLevel(lobpcg_solver, logging);
6341 HYPRE_LOBPCGSetPrecondUsageMode(lobpcg_solver, pcg_mode);
6347 HYPRE_LOBPCGSetPrecond(lobpcg_solver,
6348 (HYPRE_PtrToSolverFcn)this->PrecondSolve,
6349 (HYPRE_PtrToSolverFcn)this->PrecondSetup,
6350 (HYPRE_Solver)&precond);
6358 if (HYPRE_AssumedPartitionCheck())
6362 MPI_Scan(&locSize, &part[1], 1, HYPRE_MPI_BIG_INT, MPI_SUM, comm);
6364 part[0] = part[1] - locSize;
6366 MPI_Allreduce(&locSize, &glbSize, 1, HYPRE_MPI_BIG_INT, MPI_SUM, comm);
6372 MPI_Allgather(&locSize, 1, HYPRE_MPI_BIG_INT,
6373 &part[1], 1, HYPRE_MPI_BIG_INT, comm);
6376 for (
int i=0; i<numProcs; i++)
6378 part[i+1] += part[i];
6381 glbSize = part[numProcs];
6393 matvec_fn.MatvecCreate = this->OperatorMatvecCreate;
6394 matvec_fn.Matvec = this->OperatorMatvec;
6395 matvec_fn.MatvecDestroy = this->OperatorMatvecDestroy;
6397 HYPRE_LOBPCGSetup(lobpcg_solver,(HYPRE_Matrix)&A,NULL,NULL);
6403 matvec_fn.MatvecCreate = this->OperatorMatvecCreate;
6404 matvec_fn.Matvec = this->OperatorMatvec;
6405 matvec_fn.MatvecDestroy = this->OperatorMatvecDestroy;
6407 HYPRE_LOBPCGSetupB(lobpcg_solver,(HYPRE_Matrix)&M,NULL);
6416 for (
int i=0; i<nev; i++)
6418 eigs[i] = eigenvalues[i];
6425 return multi_vec->GetVector(i);
6432 if ( multi_vec == NULL )
6434 MFEM_ASSERT(x != NULL,
"In HypreLOBPCG::SetInitialVectors()");
6436 multi_vec =
new HypreMultiVector(nev, *x, interpreter);
6440 for (
int i=0; i < min(num_vecs,nev); i++)
6442 multi_vec->GetVector(i) = *vecs[i];
6446 for (
int i=min(num_vecs,nev); i < nev; i++)
6448 multi_vec->GetVector(i).
Randomize(seed);
6452 if ( subSpaceProj != NULL )
6455 y = multi_vec->GetVector(0);
6457 for (
int i=1; i<nev; i++)
6459 subSpaceProj->
Mult(multi_vec->GetVector(i),
6460 multi_vec->GetVector(i-1));
6462 subSpaceProj->
Mult(y,
6463 multi_vec->GetVector(nev-1));
6471 if ( multi_vec == NULL )
6473 MFEM_ASSERT(x != NULL,
"In HypreLOBPCG::Solve()");
6475 multi_vec =
new HypreMultiVector(nev, *x, interpreter);
6476 multi_vec->Randomize(seed);
6478 if ( subSpaceProj != NULL )
6481 y = multi_vec->GetVector(0);
6483 for (
int i=1; i<nev; i++)
6485 subSpaceProj->
Mult(multi_vec->GetVector(i),
6486 multi_vec->GetVector(i-1));
6488 subSpaceProj->
Mult(y, multi_vec->GetVector(nev-1));
6499 HYPRE_LOBPCGSolve(lobpcg_solver, NULL, *multi_vec, eigenvalues);
6503HypreLOBPCG::OperatorMatvecCreate(
void *A,
6510 return ( matvec_data );
6514HypreLOBPCG::OperatorMatvec(
void *matvec_data,
6515 HYPRE_Complex
alpha,
6521 MFEM_VERIFY(
alpha == 1.0 &&
beta == 0.0,
"values not supported");
6523 Operator *Aop = (Operator*)A;
6525 hypre_ParVector * xPar = (hypre_ParVector *)x;
6526 hypre_ParVector * yPar = (hypre_ParVector *)y;
6528 HypreParVector xVec(xPar);
6529 HypreParVector yVec(yPar);
6531 Aop->Mult( xVec, yVec );
6535 yVec.HypreReadWrite();
6541HypreLOBPCG::OperatorMatvecDestroy(
void *matvec_data )
6547HypreLOBPCG::PrecondSolve(
void *solver,
6552 Solver *
PC = (Solver*)solver;
6554 hypre_ParVector * bPar = (hypre_ParVector *)
b;
6555 hypre_ParVector * xPar = (hypre_ParVector *)x;
6557 HypreParVector bVec(bPar);
6558 HypreParVector xVec(xPar);
6560 PC->Mult( bVec, xVec );
6564 xVec.HypreReadWrite();
6570HypreLOBPCG::PrecondSetup(
void *solver,
6588 MPI_Comm_size(comm,&numProcs);
6589 MPI_Comm_rank(comm,&myid);
6591 HYPRE_AMECreate(&ame_solver);
6592 HYPRE_AMESetPrintLevel(ame_solver, 0);
6599 mfem_hypre_TFree_host(multi_vec);
6604 for (
int i=0; i<nev; i++)
6606 delete eigenvectors[i];
6609 delete [] eigenvectors;
6613 mfem_hypre_TFree_host(eigenvalues);
6616 HYPRE_AMEDestroy(ame_solver);
6624 HYPRE_AMESetBlockSize(ame_solver, nev);
6630 HYPRE_AMESetTol(ame_solver, tol);
6636#if MFEM_HYPRE_VERSION >= 21101
6637 HYPRE_AMESetRTol(ame_solver, rel_tol);
6639 MFEM_ABORT(
"This method requires HYPRE version >= 2.11.1");
6646 HYPRE_AMESetMaxIter(ame_solver, max_iter);
6654 HYPRE_AMESetPrintLevel(ame_solver, logging);
6661 ams_precond = &precond;
6669 HYPRE_Solver ams_precond_ptr = (HYPRE_Solver)*ams_precond;
6671 ams_precond->
SetupFcn()(*ams_precond,A,NULL,NULL);
6673 HYPRE_AMESetAMSSolver(ame_solver, ams_precond_ptr);
6676 HYPRE_AMESetup(ame_solver);
6682 HYPRE_ParCSRMatrix parcsr_M = M;
6683 HYPRE_AMESetMassMatrix(ame_solver,(HYPRE_ParCSRMatrix)parcsr_M);
6689 HYPRE_AMESolve(ame_solver);
6692 HYPRE_AMEGetEigenvalues(ame_solver,&eigenvalues);
6695 HYPRE_AMEGetEigenvectors(ame_solver,&multi_vec);
6702 eigs.
SetSize(nev); eigs = -1.0;
6705 for (
int i=0; i<nev; i++)
6707 eigs[i] = eigenvalues[i];
6712HypreAME::createDummyVectors()
const
6715 for (
int i=0; i<nev; i++)
6722const HypreParVector &
6725 if ( eigenvectors == NULL )
6727 this->createDummyVectors();
6730 return *eigenvectors[i];
6736 if ( eigenvectors == NULL )
6738 this->createDummyVectors();
6743 eigenvectors = NULL;
Dynamic 2D array using row-major layout.
Memory< T > & GetMemory()
Return a reference to the Memory object used by the Array.
const T * HostRead() const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), false).
void Sort()
Sorts the array in ascending order. This requires operator< to be defined for T.
void SetSize(int nsize)
Change the logical size of the array, keep existing entries.
int Size() const
Return the logical size of the array.
T * GetData()
Returns the data.
static MemoryType GetHostMemoryType()
Get the current Host MemoryType. This is the MemoryType used by most MFEM classes when allocating mem...
static MemoryClass GetHostMemoryClass()
Get the current Host MemoryClass. This is the MemoryClass used by most MFEM host Memory objects.
static bool Allows(unsigned long b_mask)
Return true if any of the backends in the backend mask, b_mask, are allowed.
Collection of finite elements from the same family in multiple dimensions. This class is used to matc...
int GetOrder() const
Return the order (polynomial degree) of the FE collection, corresponding to the order/degree returned...
int GetRangeDim(int dim) const
bool IsVariableOrder() const
Returns true if the space contains elements of varying polynomial orders.
Ordering::Type GetOrdering() const
Return the ordering method.
const FiniteElementCollection * FEColl() const
Hash function for data sequences.
std::string GetHash() const
Return the hash string for the current sequence and reset (clear) the sequence.
HashFunction & AppendDoubles(const real_t *doubles, size_t num_doubles)
Add a sequence of doubles for hashing, given as a c-array.
HashFunction & AppendInts(const int_type *ints, size_t num_ints)
Add a sequence of integers for hashing, given as a c-array.
void SetOperator(const Operator &op) override
Set/update the solver for the given operator.
HypreADS(ParFiniteElementSpace *face_fespace)
void SetPrintLevel(int print_lvl)
void SetNumModes(int num_eigs)
void SetPreconditioner(HypreSolver &precond)
void SetPrintLevel(int logging)
void SetMassMatrix(const HypreParMatrix &M)
const HypreParVector & GetEigenvector(unsigned int i) const
Extract a single eigenvector.
void GetEigenvalues(Array< real_t > &eigenvalues) const
Collect the converged eigenvalues.
void SetOperator(const HypreParMatrix &A)
void Solve()
Solve the eigenproblem.
void SetMaxIter(int max_iter)
HypreParVector ** StealEigenvectors()
Transfer ownership of the converged eigenvectors.
void SetRelTol(real_t rel_tol)
void SetOperator(const Operator &op) override
Set/update the solver for the given operator.
HypreAMS(ParFiniteElementSpace *edge_fespace)
Construct the AMS solver on the given edge finite element space.
void SetPrintLevel(int print_lvl)
void SetSystemsOptions(int dim, bool order_bynodes=false)
void SetElasticityOptions(ParFiniteElementSpace *fespace, bool interp_refine=true)
void SetAdvectiveOptions(int distance=15, const std::string &prerelax="", const std::string &postrelax="FFC")
void SetOperator(const Operator &op) override
Set/update the solver for the given operator.
virtual ~HypreBoomerAMG()
void SetOperator(const Operator &op) override
Set/update the solver for the given operator.
HypreEuclid(MPI_Comm comm)
void SetOperator(const Operator &op) override
Set/update the solver for the given operator.
void SetRowScale(int row_scale)
void SetMaxIter(int max_iter)
HypreFGMRES(MPI_Comm comm)
void SetOperator(const Operator &op) override
Set/update the solver for the given operator.
void SetPreconditioner(HypreSolver &precond)
Set the hypre solver to be used as a preconditioner.
void SetPrintLevel(int print_lvl)
void Mult(const HypreParVector &b, HypreParVector &x) const override
Solve Ax=b with hypre's FGMRES.
void SetLogging(int logging)
void SetOperator(const Operator &op) override
Set/update the solver for the given operator.
void SetLogging(int logging)
void SetPreconditioner(HypreSolver &precond)
Set the hypre solver to be used as a preconditioner.
void SetMaxIter(int max_iter)
void SetPrintLevel(int print_lvl)
void Mult(const HypreParVector &b, HypreParVector &x) const override
Solve Ax=b with hypre's GMRES.
HypreGMRES(MPI_Comm comm)
void SetAbsTol(real_t tol)
HypreILU()
Constructor; sets the default options.
void SetType(HYPRE_Int ilu_type)
void SetLocalReordering(HYPRE_Int reorder_type)
void SetMaxIter(HYPRE_Int max_iter)
void SetLevelOfFill(HYPRE_Int lev_fill)
Set the fill level for ILU(k); the default is k=1.
void SetOperator(const Operator &op) override
Set/update the solver for the given operator.
void SetTol(HYPRE_Real tol)
void SetPrintLevel(HYPRE_Int print_level)
Set the print level: 0 = none, 1 = setup, 2 = solve, 3 = setup+solve.
void SetMassMatrix(Operator &M)
HypreLOBPCG(MPI_Comm comm)
void SetPrintLevel(int logging)
void SetPreconditioner(Solver &precond)
void GetEigenvalues(Array< real_t > &eigenvalues) const
Collect the converged eigenvalues.
void SetOperator(Operator &A)
void Solve()
Solve the eigenproblem.
void SetPrecondUsageMode(int pcg_mode)
void SetInitialVectors(int num_vecs, HypreParVector **vecs)
void SetMaxIter(int max_iter)
void SetRelTol(real_t rel_tol)
const HypreParVector & GetEigenvector(unsigned int i) const
Extract a single eigenvector.
void Mult(const HypreParVector &b, HypreParVector &x) const override
Solve Ax=b with hypre's PCG.
void SetResidualConvergenceOptions(int res_frequency=-1, real_t rtol=0.0)
void SetPrintLevel(int print_lvl)
void SetLogging(int logging)
void SetPreconditioner(HypreSolver &precond)
Set the hypre solver to be used as a preconditioner.
void SetMaxIter(int max_iter)
void SetOperator(const Operator &op) override
Set/update the solver for the given operator.
void SetAbsTol(real_t atol)
Wrapper for hypre's ParCSR matrix class.
void HypreReadWrite()
Update the internal hypre_ParCSRMatrix object, A, to be in hypre memory space.
void operator*=(real_t s)
Scale all entries by s: A_scaled = s*A.
signed char OwnsColMap() const
Get colmap ownership flag.
HYPRE_BigInt N() const
Returns the global number of columns.
void ScaleRows(const Vector &s)
Scale the local row i by s(i).
void AbsMultTranspose(real_t a, const Vector &x, real_t b, Vector &y) const
Computes y = a * |At| * x + b * y, using entry-wise absolute values of the transpose of the matrix A.
void HostReadWrite()
Update the internal hypre_ParCSRMatrix object, A, to be on host.
void Print(const std::string &fname, HYPRE_Int offi=0, HYPRE_Int offj=0) const
Prints the locally owned rows in parallel. The resulting files can be read with Read_IJMatrix().
void DropSmallEntries(real_t tol)
Wrapper for hypre_ParCSRMatrixDropSmallEntries in different versions of hypre. Drop off-diagonal entr...
void PrintCommPkg(std::ostream &out=mfem::out) const
Print information about the hypre_ParCSRCommPkg of the HypreParMatrix.
void GetDiag(Vector &diag) const
Get the local diagonal of the matrix.
HYPRE_Int MultTranspose(HypreParVector &x, HypreParVector &y, real_t alpha=1.0, real_t beta=0.0) const
Computes y = alpha * A^t * x + beta * y.
void Threshold(real_t threshold=0.0)
Remove values smaller in absolute value than some threshold.
Memory< HYPRE_Int > & GetDiagMemoryI()
HypreParMatrix * LeftDiagMult(const SparseMatrix &D, HYPRE_BigInt *row_starts=NULL) const
Multiply the HypreParMatrix on the left by a block-diagonal parallel matrix D and return the result a...
void EliminateRows(const Array< int > &rows)
Eliminate rows from the diagonal and off-diagonal blocks of the matrix.
void AbsMult(real_t a, const Vector &x, real_t b, Vector &y) const
Computes y = a * |A| * x + b * y, using entry-wise absolute values of the matrix A.
int GetNumCols() const
Returns the number of columns in the diagonal block of the ParCSRMatrix.
void SetOwnerFlags(signed char diag, signed char offd, signed char colmap)
Explicitly set the three ownership flags, see docs for diagOwner etc.
void InvScaleRows(const Vector &s)
Scale the local row i by 1./s(i)
void ResetTranspose() const
Reset (destroy) the internal transpose matrix that is created by EnsureMultTranspose() and MultTransp...
void WrapHypreParCSRMatrix(hypre_ParCSRMatrix *a, bool owner=true)
Converts hypre's format to HypreParMatrix.
int GetNumRows() const
Returns the number of rows in the diagonal block of the ParCSRMatrix.
Memory< HYPRE_Int > & GetDiagMemoryJ()
HYPRE_BigInt * GetRowStarts() const
Return the parallel row partitioning array.
void HypreRead() const
Update the internal hypre_ParCSRMatrix object, A, to be in hypre memory space.
HypreParMatrix()
An empty matrix to be used as a reference to an existing matrix.
signed char OwnsOffd() const
Get offd ownership flag.
HYPRE_BigInt GetGlobalNumRows() const
Return the global number of rows.
void PrintHash(std::ostream &out) const
Print sizes and hashes for all data arrays of the HypreParMatrix from the local MPI rank.
void HostWrite()
Update the internal hypre_ParCSRMatrix object, A, to be on host.
HYPRE_BigInt GetGlobalNumCols() const
Return the global number of columns.
void HostRead() const
Update the internal hypre_ParCSRMatrix object, A, to be on host.
void EliminateRowsCols(const Array< int > &rows_cols, const HypreParVector &X, HypreParVector &B)
HYPRE_Int Mult(HypreParVector &x, HypreParVector &y, real_t alpha=1.0, real_t beta=0.0) const
Computes y = alpha * A * x + beta * y.
HypreParMatrix * ExtractSubmatrix(const Array< int > &indices, real_t threshold=0.0) const
void EnsureMultTranspose() const
Ensure the action of the transpose is performed fast.
MPI_Comm GetComm() const
MPI communicator.
Memory< real_t > & GetDiagMemoryData()
HYPRE_BigInt * GetColStarts() const
Return the parallel column partitioning array.
void Read_IJMatrix(MPI_Comm comm, const std::string &fname)
Read a matrix saved as a HYPRE_IJMatrix.
HypreParMatrix & Add(const real_t beta, const HypreParMatrix &B)
void GetBlocks(Array2D< HypreParMatrix * > &blocks, bool interleaved_rows=false, bool interleaved_cols=false) const
void EliminateBC(const HypreParMatrix &Ae, const Array< int > &ess_dof_list, const Vector &X, Vector &B) const
Eliminate essential BC specified by ess_dof_list from the solution X to the r.h.s....
void MakeRef(const HypreParMatrix &master)
Make this HypreParMatrix a reference to 'master'.
real_t FNorm() const
Return the Frobenius norm of the matrix (or 0 if the underlying hypre matrix is NULL)
void GetOffd(SparseMatrix &offd, HYPRE_BigInt *&cmap) const
Get the local off-diagonal block. NOTE: 'offd' will not own any data.
HYPRE_BigInt M() const
Returns the global number of rows.
hypre_ParCSRMatrix * StealData()
Changes the ownership of the matrix.
HypreParMatrix * Transpose() const
Returns the transpose of *this.
void MergeDiagAndOffd(SparseMatrix &merged)
Get a single SparseMatrix containing all rows from this processor, merged from the diagonal and off-d...
HypreParMatrix * EliminateCols(const Array< int > &cols)
Wrapper for hypre's parallel vector class.
void WrapMemoryWrite(Memory< real_t > &mem)
Replace the HypreParVector's data with the given Memory, mem, and prepare the vector for write access...
void HypreRead() const
Prepare the HypreParVector for read access in hypre's device memory space, HYPRE_MEMORY_DEVICE.
void Read(MPI_Comm comm, const std::string &fname)
Reads a HypreParVector from files saved with HypreParVector::Print.
HypreParVector CreateCompatibleVector() const
Constructs a HypreParVector compatible with the calling vector.
void Print(const std::string &fname) const
Prints the locally owned rows in parallel.
void WrapMemoryReadWrite(Memory< real_t > &mem)
Replace the HypreParVector's data with the given Memory, mem, and prepare the vector for read and wri...
~HypreParVector()
Calls hypre's destroy function.
void WrapHypreParVector(hypre_ParVector *y, bool owner=true)
Converts hypre's format to HypreParVector.
HYPRE_Int Randomize(HYPRE_Int seed)
Set random values.
void HypreReadWrite()
Prepare the HypreParVector for read and write access in hypre's device memory space,...
void HypreWrite()
Prepare the HypreParVector for write access in hypre's device memory space, HYPRE_MEMORY_DEVICE.
HypreParVector()
Default constructor, no underlying hypre_ParVector is created.
void WrapMemoryRead(const Memory< real_t > &mem)
Replace the HypreParVector's data with the given Memory, mem, and prepare the vector for read access ...
HypreParVector & operator=(real_t d)
Set constant values.
void SetData(real_t *data_)
Sets the data of the Vector and the hypre_ParVector to data_.
Vector * GlobalVector() const
Returns the global vector in each processor.
void SetOwnership(int own)
Sets ownership of the internal hypre_ParVector.
void SetReuse(int reuse)
Set the pattern reuse parameter.
void SetOperator(const Operator &op) override
Set/update the solver for the given operator.
void SetSymmetry(int sym)
Set symmetry parameter.
void SetParams(real_t thresh, int nlevels)
Set the threshold and levels parameters.
void SetLoadBal(real_t loadbal)
Set the load balance parameter.
HypreParaSails(MPI_Comm comm)
void SetFilter(real_t filter)
Set the filter parameter.
void SetLogging(int logging)
Set the logging parameter.
virtual ~HypreParaSails()
int eig_est_cg_iter
Number of CG iterations to determine eigenvalue estimates.
int poly_order
Order of the smoothing polynomial.
void SetPolyOptions(int poly_order, real_t poly_fraction, int eig_est_cg_iter=10)
Set parameters for polynomial smoothing.
void SetFIRCoefficients(real_t max_eig)
Compute window and Chebyshev coefficients for given polynomial order.
void SetWindowParameters(real_t a, real_t b, real_t c)
Set parameters for windowing function for FIR smoother.
real_t poly_fraction
Fraction of spectrum to smooth for polynomial relaxation.
void MultTranspose(const Vector &b, Vector &x) const override
Apply transpose of the smoother to relax the linear system Ax=b.
HypreParVector * V
Temporary vectors.
HypreParMatrix * A
The linear system matrix.
real_t * fir_coeffs
Combined coefficients for windowing and Chebyshev polynomials.
int relax_times
Number of relaxation sweeps.
void SetWindowByName(const char *window_name)
Convenience function for setting canonical windowing parameters.
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Relax the linear system Ax=b.
real_t * l1_norms
l1 norms of the rows of A
void SetOperator(const Operator &op) override
real_t max_eig_est
Maximal eigenvalue estimate for polynomial smoothing.
void SetTaubinOptions(real_t lambda, real_t mu, int iter)
Set parameters for Taubin's lambda-mu method.
bool pos_l1_norms
If set, take absolute values of the computed l1_norms.
real_t window_params[3]
Parameters for windowing function of FIR filter.
real_t omega
SOR parameter (usually in (0,2))
int poly_scale
Apply the polynomial smoother to A or D^{-1/2} A D^{-1/2}.
HypreParVector * B
Right-hand side and solution vectors.
real_t relax_weight
Damping coefficient (usually <= 1)
real_t min_eig_est
Minimal eigenvalue estimate for polynomial smoothing.
void SetSOROptions(real_t relax_weight, real_t omega)
Set SOR-related parameters.
void SetType(HypreSmoother::Type type, int relax_times=1)
Set the relaxation type and number of sweeps.
Type
HYPRE smoother types.
real_t lambda
Taubin's lambda-mu method parameters.
Memory< real_t > auxB
Auxiliary buffers for the case when the input or output arrays in methods like Mult(const Vector &,...
bool A_is_symmetric
A flag that indicates whether the linear system matrix A is symmetric.
HypreParVector * X0
FIR Filter Temporary Vectors.
static Type DefaultType()
Default value for the smoother type used by the constructors: Type::l1GS when HYPRE is running on CPU...
Abstract class for hypre's solvers and preconditioners.
const HypreParMatrix * A
The linear system matrix.
int setup_called
Was hypre's Setup function called already?
@ WARN_HYPRE_ERRORS
Issue warnings on hypre errors.
@ IGNORE_HYPRE_ERRORS
Ignore hypre errors (see e.g. HypreADS)
@ ABORT_HYPRE_ERRORS
Abort on hypre errors (default in base class)
virtual HYPRE_PtrToParSolverFcn SolveFcn() const =0
hypre's internal Solve function
HypreParVector * B
Right-hand side and solution vector.
bool WrapVectors(const Vector &b, Vector &x) const
Makes the internal HypreParVectors B and X wrap the input vectors b and x.
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Solve the linear system Ax=b.
ErrorMode error_mode
How to treat hypre errors.
void SetOperator(const Operator &op) override
Set/update the solver for the given operator.
virtual void Setup(const HypreParVector &b, HypreParVector &x) const
Set up the solver (if not set up already, also called automatically by HypreSolver::Mult).
virtual HYPRE_PtrToParSolverFcn SetupFcn() const =0
hypre's internal Setup function
static void InitDevice()
Configure HYPRE's compute and memory policy.
static void Init()
Initialize hypre by calling HYPRE_Init() and set default options. After calling Hypre::Init(),...
static bool configure_runtime_policy_from_mfem
Use MFEM's device policy to configure HYPRE's device policy, true by default. This variable is used b...
static void Finalize()
Finalize hypre (called automatically at program exit if Hypre::Init() has been called).
A class to initialize the size of a Tensor.
static MemoryType GetDualMemoryType(MemoryType mt)
Return the dual MemoryType of the given one, mt.
Class used by MFEM to store pointers to host and/or device memory.
void SetHostPtrOwner(bool own) const
Set/clear the ownership flag for the host pointer. Ownership indicates whether the pointer will be de...
T * Write(MemoryClass mc, int size)
Get write-only access to the memory with the given MemoryClass.
MemoryType GetHostMemoryType() const
Return the host MemoryType of the Memory object.
void SetDevicePtrOwner(bool own) const
Set/clear the ownership flag for the device pointer. Ownership indicates whether the pointer will be ...
int Capacity() const
Return the size of the allocated memory.
MemoryType GetDeviceMemoryType() const
Return the device MemoryType of the Memory object. If the device MemoryType is not set,...
T * ReadWrite(MemoryClass mc, int size)
Get read-write access to the memory with the given MemoryClass.
void MakeAlias(const Memory &base, int offset, int size)
Create a memory object that points inside the memory object base.
bool OwnsHostPtr() const
Return true if the host pointer is owned. Ownership indicates whether the pointer will be deleted by ...
bool Empty() const
Return true if the Memory object is empty, see Reset().
const T * Read(MemoryClass mc, int size) const
Get read-only access to the memory with the given MemoryClass.
void CopyFrom(const Memory &src, int size)
Copy size entries from src to *this.
void Reset()
Reset the memory to be empty, ensuring that Delete() will be a no-op.
void Wrap(T *ptr, int size, bool own)
Wrap an externally allocated host pointer, ptr with the current host memory type returned by MemoryMa...
void Delete()
Delete the owned pointers and reset the Memory object.
void ClearOwnerFlags() const
Clear the ownership flags for the host and device pointers, as well as any internal data allocated by...
void New(int size)
Allocate host memory for size entries with the current host memory type returned by MemoryManager::Ge...
int Dimension() const
Dimension of the reference space used within the elements.
int SpaceDimension() const
Dimension of the physical space containing the mesh.
Arbitrary order H(curl)-trace finite elements defined on the interface between mesh elements (faces,...
int width
Dimension of the input / number of columns in the matrix.
int Height() const
Get the height (size of output) of the Operator. Synonym with NumRows().
int height
Dimension of the output / number of rows in the matrix.
virtual void Mult(const Vector &x, Vector &y) const =0
Operator application: y=A(x).
int NumCols() const
Get the number of columns (size of input) of the Operator. Synonym with Width().
DiagonalPolicy
Defines operator diagonal policy upon elimination of rows and/or columns.
@ DIAG_ONE
Set the diagonal value to one.
@ DIAG_ZERO
Set the diagonal value to zero.
int Width() const
Get the width (size of input) of the Operator. Synonym with NumCols().
int NumRows() const
Get the number of rows (size of output) of the Operator. Synonym with Height().
Abstract parallel finite element space.
HYPRE_BigInt * GetTrueDofOffsets() const
HYPRE_BigInt GlobalTrueVSize() const
HypreParVector * NewTrueDofVector()
ParMesh * GetParMesh() const
Class for parallel meshes.
Arbitrary order "H^{-1/2}-conforming" face finite elements defined on the interface between mesh elem...
bool iterative_mode
If true, use the second argument of Mult() as an initial guess.
int NumNonZeroElems() const override
Returns the number of the nonzero elements in the matrix.
Memory< int > & GetMemoryI()
void Swap(SparseMatrix &other)
void Clear()
Clear the contents of the SparseMatrix.
Memory< int > & GetMemoryJ()
Memory< real_t > & GetMemoryData()
int Size() const
Returns the number of TYPE I elements.
int Size_of_connections() const
virtual const real_t * HostRead() const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), false).
void SetDataAndSize(real_t *d, int s)
Set the Vector data and size.
real_t Normlinf() const
Returns the l_infinity norm of the vector.
void MakeDataOwner() const
Set the Vector data (host pointer) ownership flag.
real_t Norml1() const
Returns the l_1 norm of the vector.
Memory< real_t > & GetMemory()
Return a reference to the Memory object used by the Vector.
real_t Norml2() const
Returns the l2 norm of the vector.
void Destroy()
Destroy a vector.
int Size() const
Returns the size of the vector.
void SetSize(int s)
Resize the vector to size s.
virtual real_t * HostWrite()
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), false).
Vector & operator=(const real_t *v)
Copy Size() entries from v.
virtual real_t * HostReadWrite()
Shortcut for mfem::ReadWrite(vec.GetMemory(), vec.Size(), false).
real_t sigma(const Vector &x)
MemoryType GetHypreMemoryType()
The MemoryType used by MFEM when allocating arrays for Hypre objects.
void CopyMemory(Memory< T > &src, Memory< T > &dst, MemoryClass dst_mc, bool dst_owner)
Shallow or deep copy src to dst with the goal to make the array src accessible through dst with the M...
bool CanShallowCopy(const Memory< T > &src, MemoryClass mc)
Return true if the src Memory can be used with the MemoryClass mc.
const T * Read(const Memory< T > &mem, int size, bool on_dev=true)
Get a pointer for read access to mem with the mfem::Device's DeviceMemoryClass, if on_dev = true,...
MemoryClass GetHypreForallMemoryClass()
real_t infinity()
Define a shortcut for std::numeric_limits<double>::infinity()
real_t ParNormlp(const Vector &vec, real_t p, MPI_Comm comm)
Compute the l_p norm of the Vector which is split without overlap across the given communicator.
void CopyConvertMemory(Memory< SrcT > &src, MemoryClass dst_mc, Memory< DstT > &dst)
Deep copy and convert src to dst with the goal to make the array src accessible through dst with the ...
real_t u(const Vector &xvec)
void mfem_error(const char *msg)
void Mult(const Table &A, const Table &B, Table &C)
C = A * B (as boolean matrices)
decltype(hypre_CSRMatrix::memory_location) GetHypreParMatrixMemoryLocation(MemoryClass mc)
const T * HostRead(const Memory< T > &mem, int size)
Shortcut to Read(const Memory<T> &mem, int size, false)
T * Write(Memory< T > &mem, int size, bool on_dev=true)
Get a pointer for write access to mem with the mfem::Device's DeviceMemoryClass, if on_dev = true,...
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
real_t InnerProduct(HypreParVector *x, HypreParVector *y)
MemoryClass
Memory classes identify sets of memory types.
MemoryClass GetHypreMemoryClass()
The MemoryClass used by Hypre objects.
void RAP(const DenseMatrix &A, const DenseMatrix &P, DenseMatrix &RAP)
MemoryType GetMemoryType(MemoryClass mc)
Return a suitable MemoryType for a given MemoryClass.
int ParCSRRelax_FIR(hypre_ParCSRMatrix *A, hypre_ParVector *f, real_t max_eig, int poly_order, real_t *fir_coeffs, hypre_ParVector *u, hypre_ParVector *x0, hypre_ParVector *x1, hypre_ParVector *x2, hypre_ParVector *x3)
void delete_hypre_ParCSRMatrixColMapOffd(hypre_ParCSRMatrix *A)
HypreParMatrix * HypreParMatrixFromBlocks(Array2D< const HypreParMatrix * > &blocks, Array2D< real_t > *blockCoeff)
Returns a merged hypre matrix constructed from hypre matrix blocks.
HypreParMatrix * ParAdd(const HypreParMatrix *A, const HypreParMatrix *B)
Returns the matrix A + B.
T * HostWrite(Memory< T > &mem, int size)
Shortcut to Write(const Memory<T> &mem, int size, false)
int ParCSRRelax_Taubin(hypre_ParCSRMatrix *A, hypre_ParVector *f, real_t lambda, real_t mu, int N, real_t max_eig, hypre_ParVector *u, hypre_ParVector *r)
void HypreStealOwnership(HypreParMatrix &A_hyp, SparseMatrix &A_diag)
Make A_hyp steal ownership of its diagonal part A_diag.
void GatherBlockOffsetData(MPI_Comm comm, const int rank, const int nprocs, const int num_loc, const Array< int > &offsets, std::vector< int > &all_num_loc, const int numBlocks, std::vector< std::vector< HYPRE_BigInt > > &blockProcOffsets, std::vector< HYPRE_BigInt > &procOffsets, std::vector< std::vector< int > > &procBlockOffsets, HYPRE_BigInt &firstLocal, HYPRE_BigInt &globalNum)
HypreParMatrix * ParMult(const HypreParMatrix *A, const HypreParMatrix *B, bool own_matrix)
void hypre_forall(int N, lambda &&body)
bool HypreUsingGPU()
Return true if HYPRE is configured to use GPU.
void BlockInverseScale(const HypreParMatrix *A, HypreParMatrix *C, const Vector *b, HypreParVector *d, int blocksize, BlockInverseScaleJob job)
MemoryType
Memory types supported by MFEM.
@ HOST
Host memory; using new[] and delete[].
void EliminateBC(const HypreParMatrix &A, const HypreParMatrix &Ae, const Array< int > &ess_dof_list, const Vector &X, Vector &B)
Eliminate essential BC specified by ess_dof_list from the solution X to the r.h.s....
HYPRE_MemoryLocation GetHypreMemoryLocation()
Return the configured HYPRE_MemoryLocation.
std::function< real_t(const Vector &)> f(real_t mass_coeff)
bool MemoryClassContainsType(MemoryClass mc, MemoryType mt)
Return true iff the MemoryType mt is contained in the MemoryClass mc.
void forall(int N, lambda &&body)
void Add(const DenseMatrix &A, const DenseMatrix &B, real_t alpha, DenseMatrix &C)
C = A + alpha*B.
real_t p(const Vector &x, real_t t)
@ DEBUG_DEVICE
[device] Debug backend: host memory is READ/WRITE protected while a device is in use....
@ DEVICE_MASK
Biwise-OR of all device backends.
Helper struct to convert a C++ type to an MPI type.