MFEM  v4.6.0
Finite element discretization library
hypre.cpp
Go to the documentation of this file.
1 // Copyright (c) 2010-2023, Lawrence Livermore National Security, LLC. Produced
2 // at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3 // LICENSE and NOTICE for details. LLNL-CODE-806117.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability visit https://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the BSD-3 license. We welcome feedback and contributions, see file
10 // CONTRIBUTING.md for details.
11 
12 #include "../config/config.hpp"
13 
14 #ifdef MFEM_USE_MPI
15 
16 #include "linalg.hpp"
17 #include "../fem/fem.hpp"
18 #include "../general/forall.hpp"
19 
20 #include <fstream>
21 #include <iomanip>
22 #include <cmath>
23 #include <cstdlib>
24 
25 using namespace std;
26 
27 namespace mfem
28 {
29 
30 Hypre::Hypre()
31 {
32 #if MFEM_HYPRE_VERSION >= 21900
33  // Initializing hypre
34  HYPRE_Init();
35 #endif
36 
37  // Global hypre options that we set by default
38  SetDefaultOptions();
39 }
40 
41 void Hypre::Finalize()
42 {
43  Hypre &hypre = Instance();
44  if (!hypre.finalized)
45  {
46 #if MFEM_HYPRE_VERSION >= 21900
47  HYPRE_Finalize();
48 #endif
49  hypre.finalized = true;
50  }
51 }
52 
53 void Hypre::SetDefaultOptions()
54 {
55  // Global hypre options, see
56  // https://hypre.readthedocs.io/en/latest/solvers-boomeramg.html#gpu-supported-options
57 
58 #if MFEM_HYPRE_VERSION >= 22100
59 #ifdef HYPRE_USING_CUDA
60  // Use hypre's SpGEMM instead of cuSPARSE for performance reasons
61  HYPRE_SetSpGemmUseCusparse(0);
62 #elif defined(HYPRE_USING_HIP)
63  // Use rocSPARSE instead of hypre's SpGEMM for performance reasons (default)
64  // HYPRE_SetSpGemmUseCusparse(1);
65 #endif
66 #endif
67 
68  // The following options are hypre's defaults as of hypre-2.24
69 
70  // Allocate hypre objects in GPU memory (default)
71  // HYPRE_SetMemoryLocation(HYPRE_MEMORY_DEVICE);
72 
73  // Where to execute when using UVM (default)
74  // HYPRE_SetExecutionPolicy(HYPRE_EXEC_DEVICE);
75 
76  // Use GPU-based random number generator (default)
77  // HYPRE_SetUseGpuRand(1);
78 
79  // The following options are to be used with UMPIRE memory pools
80 
81  // Set Umpire names for device and UVM memory pools. If names are set by
82  // calling these functions, hypre doesn't own the pool and just uses it.If
83  // these functions are not called, hypre will allocate and own the pool
84  // (provided it is configured with --with-umpire).
85  // HYPRE_SetUmpireDevicePoolName("HYPRE_DEVICE_POOL");
86  // HYPRE_SetUmpireUMPoolName("HYPRE_UVM_POOL");
87 }
88 
89 
90 template<typename TargetT, typename SourceT>
91 static TargetT *DuplicateAs(const SourceT *array, int size,
92  bool cplusplus = true)
93 {
94  TargetT *target_array = cplusplus ? (TargetT*) Memory<TargetT>(size)
95  /* */ : mfem_hypre_TAlloc_host(TargetT, size);
96  for (int i = 0; i < size; i++)
97  {
98  target_array[i] = array[i];
99  }
100  return target_array;
101 }
102 
103 
104 /// Return true if the @a src Memory can be used with the MemoryClass @a mc.
105 /** If this function returns true then src.{Read,Write,ReadWrite} can be called
106  safely with the MemoryClass @a mc. */
107 template <typename T>
109 {
110  MemoryType src_h_mt = src.GetHostMemoryType();
111  MemoryType src_d_mt = src.GetDeviceMemoryType();
112  if (src_d_mt == MemoryType::DEFAULT)
113  {
114  src_d_mt = MemoryManager::GetDualMemoryType(src_h_mt);
115  }
116  return (MemoryClassContainsType(mc, src_h_mt) ||
117  MemoryClassContainsType(mc, src_d_mt));
118 }
119 
120 
121 inline void HypreParVector::_SetDataAndSize_()
122 {
123  hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
124 #if !defined(HYPRE_USING_GPU)
125  SetDataAndSize(hypre_VectorData(x_loc),
126  internal::to_int(hypre_VectorSize(x_loc)));
127 #else
128  size = internal::to_int(hypre_VectorSize(x_loc));
129  MemoryType mt = (hypre_VectorMemoryLocation(x_loc) == HYPRE_MEMORY_HOST
130  ? MemoryType::HOST : GetHypreMemoryType());
131  if (hypre_VectorData(x_loc) != NULL)
132  {
133  data.Wrap(hypre_VectorData(x_loc), size, mt, false);
134  }
135  else
136  {
137  data.Reset();
138  }
139 #endif
140 }
141 
142 HypreParVector::HypreParVector(MPI_Comm comm, HYPRE_BigInt glob_size,
143  HYPRE_BigInt *col) : Vector()
144 {
145  x = hypre_ParVectorCreate(comm,glob_size,col);
146  hypre_ParVectorInitialize(x);
147 #if MFEM_HYPRE_VERSION <= 22200
148  hypre_ParVectorSetPartitioningOwner(x,0);
149 #endif
150  // The data will be destroyed by hypre (this is the default)
151  hypre_ParVectorSetDataOwner(x,1);
152  hypre_SeqVectorSetDataOwner(hypre_ParVectorLocalVector(x),1);
153  _SetDataAndSize_();
154  own_ParVector = 1;
155 }
156 
158  double *data_, HYPRE_BigInt *col,
159  bool is_device_ptr)
160  : Vector()
161 {
162  x = hypre_ParVectorCreate(comm,glob_size,col);
163  hypre_ParVectorSetDataOwner(x,1); // owns the seq vector
164  hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
165  hypre_SeqVectorSetDataOwner(x_loc,0);
166 #if MFEM_HYPRE_VERSION <= 22200
167  hypre_ParVectorSetPartitioningOwner(x,0);
168 #endif
169  double tmp = 0.0;
170  hypre_VectorData(x_loc) = &tmp;
171 #ifdef HYPRE_USING_GPU
172  hypre_VectorMemoryLocation(x_loc) =
173  is_device_ptr ? HYPRE_MEMORY_DEVICE : HYPRE_MEMORY_HOST;
174 #else
175  (void)is_device_ptr;
176 #endif
177  // If hypre_ParVectorLocalVector(x) and &tmp are non-NULL,
178  // hypre_ParVectorInitialize(x) does not allocate memory!
179  hypre_ParVectorInitialize(x);
180  // Set the internal data array to the one passed in
181  hypre_VectorData(x_loc) = data_;
182  _SetDataAndSize_();
183  own_ParVector = 1;
184 }
185 
186 // Call the move constructor on the "compatible" temp vector
188  y.CreateCompatibleVector())
189 {
190  // Deep copy the local data
191  hypre_SeqVectorCopy(hypre_ParVectorLocalVector(y.x),
192  hypre_ParVectorLocalVector(x));
193 }
194 
196 {
197  own_ParVector = 0;
198  *this = std::move(y);
199 }
200 
202  int transpose) : Vector()
203 {
204  if (!transpose)
205  {
206  x = hypre_ParVectorInDomainOf(const_cast<HypreParMatrix&>(A));
207  }
208  else
209  {
210  x = hypre_ParVectorInRangeOf(const_cast<HypreParMatrix&>(A));
211  }
212  _SetDataAndSize_();
213  own_ParVector = 1;
214 }
215 
217 {
218  x = (hypre_ParVector *) y;
219  _SetDataAndSize_();
220  own_ParVector = 0;
221 }
222 
224 {
225  x = hypre_ParVectorCreate(pfes->GetComm(), pfes->GlobalTrueVSize(),
226  pfes->GetTrueDofOffsets());
227  hypre_ParVectorInitialize(x);
228 #if MFEM_HYPRE_VERSION <= 22200
229  hypre_ParVectorSetPartitioningOwner(x,0);
230 #endif
231  // The data will be destroyed by hypre (this is the default)
232  hypre_ParVectorSetDataOwner(x,1);
233  hypre_SeqVectorSetDataOwner(hypre_ParVectorLocalVector(x),1);
234  _SetDataAndSize_();
235  own_ParVector = 1;
236 }
237 
239 {
240  HypreParVector result;
241  result.x = hypre_ParVectorCreate(x -> comm, x -> global_size,
242  x -> partitioning);
243  hypre_ParVectorInitialize(result.x);
244 #if MFEM_HYPRE_VERSION <= 22200
245  hypre_ParVectorSetPartitioningOwner(result.x,0);
246 #endif
247  hypre_ParVectorSetDataOwner(result.x,1);
248  hypre_SeqVectorSetDataOwner(hypre_ParVectorLocalVector(result.x),1);
249  result._SetDataAndSize_();
250  result.own_ParVector = 1;
251 
252  return result;
253 }
254 
255 void HypreParVector::WrapHypreParVector(hypre_ParVector *y, bool owner)
256 {
257  if (own_ParVector) { hypre_ParVectorDestroy(x); }
258  Destroy();
259  x = y;
260  _SetDataAndSize_();
261  own_ParVector = owner;
262 }
263 
265 {
266  hypre_Vector *hv = hypre_ParVectorToVectorAll(*this);
267  Vector *v = new Vector(hv->data, internal::to_int(hv->size));
268  v->MakeDataOwner();
269  hypre_SeqVectorSetDataOwner(hv,0);
270  hypre_SeqVectorDestroy(hv);
271  return v;
272 }
273 
275 {
277  return *this;
278 }
279 
281 {
282 #ifdef MFEM_DEBUG
283  if (size != y.Size())
284  {
285  mfem_error("HypreParVector::operator=");
286  }
287 #endif
288 
290  return *this;
291 }
292 
294 {
295  Vector::operator=(std::move(y));
296  // Self-assignment-safe way to move for 'own_ParVector' and 'x':
297  const auto own_tmp = y.own_ParVector;
298  y.own_ParVector = 0;
299  own_ParVector = own_tmp;
300  const auto x_tmp = y.x;
301  y.x = nullptr;
302  x = x_tmp;
303  return *this;
304 }
305 
306 void HypreParVector::SetData(double *data_)
307 {
308  hypre_VectorData(hypre_ParVectorLocalVector(x)) = data_;
309  Vector::SetData(data_);
310 }
311 
313 {
314  hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
315  hypre_VectorData(x_loc) =
316  const_cast<double*>(data.Read(GetHypreMemoryClass(), size));
317 #ifdef HYPRE_USING_GPU
318  hypre_VectorMemoryLocation(x_loc) = HYPRE_MEMORY_DEVICE;
319 #endif
320 }
321 
323 {
324  hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
325  hypre_VectorData(x_loc) = data.ReadWrite(GetHypreMemoryClass(), size);
326 #ifdef HYPRE_USING_GPU
327  hypre_VectorMemoryLocation(x_loc) = HYPRE_MEMORY_DEVICE;
328 #endif
329 }
330 
332 {
333  hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
334  hypre_VectorData(x_loc) = data.Write(GetHypreMemoryClass(), size);
335 #ifdef HYPRE_USING_GPU
336  hypre_VectorMemoryLocation(x_loc) = HYPRE_MEMORY_DEVICE;
337 #endif
338 }
339 
341 {
342  MFEM_ASSERT(CanShallowCopy(mem, GetHypreMemoryClass()), "");
343  MFEM_ASSERT(mem.Capacity() >= size, "");
344 
345  data.Delete();
346  hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
347  hypre_VectorData(x_loc) =
348  const_cast<double*>(mem.Read(GetHypreMemoryClass(), size));
349 #ifdef HYPRE_USING_GPU
350  hypre_VectorMemoryLocation(x_loc) = HYPRE_MEMORY_DEVICE;
351 #endif
352  data.MakeAlias(mem, 0, size);
353 }
354 
356 {
357  MFEM_ASSERT(CanShallowCopy(mem, GetHypreMemoryClass()), "");
358  MFEM_ASSERT(mem.Capacity() >= size, "");
359 
360  data.Delete();
361  hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
362  hypre_VectorData(x_loc) = mem.ReadWrite(GetHypreMemoryClass(), size);
363 #ifdef HYPRE_USING_GPU
364  hypre_VectorMemoryLocation(x_loc) = HYPRE_MEMORY_DEVICE;
365 #endif
366  data.MakeAlias(mem, 0, size);
367 }
368 
370 {
371  MFEM_ASSERT(CanShallowCopy(mem, GetHypreMemoryClass()), "");
372  MFEM_ASSERT(mem.Capacity() >= size, "");
373 
374  data.Delete();
375  hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
376  hypre_VectorData(x_loc) = mem.Write(GetHypreMemoryClass(), size);
377 #ifdef HYPRE_USING_GPU
378  hypre_VectorMemoryLocation(x_loc) = HYPRE_MEMORY_DEVICE;
379 #endif
380  data.MakeAlias(mem, 0, size);
381 }
382 
383 HYPRE_Int HypreParVector::Randomize(HYPRE_Int seed)
384 {
385  return hypre_ParVectorSetRandomValues(x,seed);
386 }
387 
388 void HypreParVector::Print(const char *fname) const
389 {
390  hypre_ParVectorPrint(x,fname);
391 }
392 
393 void HypreParVector::Read(MPI_Comm comm, const char *fname)
394 {
395  if (own_ParVector)
396  {
397  hypre_ParVectorDestroy(x);
398  }
399  data.Delete();
400  x = hypre_ParVectorRead(comm, fname);
401  own_ParVector = true;
402  _SetDataAndSize_();
403 }
404 
406 {
407  if (own_ParVector)
408  {
409  hypre_ParVectorDestroy(x);
410  }
411 }
412 
413 
415 {
416  return hypre_ParVectorInnerProd(*x, *y);
417 }
418 
420 {
421  return hypre_ParVectorInnerProd(x, y);
422 }
423 
424 
425 double ParNormlp(const Vector &vec, double p, MPI_Comm comm)
426 {
427  double norm = 0.0;
428  if (p == 1.0)
429  {
430  double loc_norm = vec.Norml1();
431  MPI_Allreduce(&loc_norm, &norm, 1, MPI_DOUBLE, MPI_SUM, comm);
432  }
433  if (p == 2.0)
434  {
435  double loc_norm = vec*vec;
436  MPI_Allreduce(&loc_norm, &norm, 1, MPI_DOUBLE, MPI_SUM, comm);
437  norm = sqrt(norm);
438  }
439  if (p < infinity())
440  {
441  double sum = 0.0;
442  for (int i = 0; i < vec.Size(); i++)
443  {
444  sum += pow(fabs(vec(i)), p);
445  }
446  MPI_Allreduce(&sum, &norm, 1, MPI_DOUBLE, MPI_SUM, comm);
447  norm = pow(norm, 1.0/p);
448  }
449  else
450  {
451  double loc_norm = vec.Normlinf();
452  MPI_Allreduce(&loc_norm, &norm, 1, MPI_DOUBLE, MPI_MAX, comm);
453  }
454  return norm;
455 }
456 
457 /** @brief Shallow or deep copy @a src to @a dst with the goal to make the
458  array @a src accessible through @a dst with the MemoryClass @a dst_mc. If
459  one of the host/device MemoryType%s of @a src is contained in @a dst_mc,
460  then a shallow copy will be used and @a dst will simply be an alias of
461  @a src. Otherwise, @a dst will be properly allocated and @a src will be deep
462  copied to @a dst. */
463 /** If @a dst_owner is set to true and shallow copy is being used, then @a dst
464  will not be an alias of @a src; instead, @a src is copied to @a dst and all
465  ownership flags of @a src are reset.
466 
467  In both cases (deep or shallow copy), when @a dst is no longer needed,
468  dst.Delete() must be called to ensure all associated memory allocations are
469  freed.
470 
471  The input contents of @a dst, if any, is not used and it is overwritten by
472  this function. In particular, @a dst should be empty or deleted before
473  calling this function. */
474 template <typename T>
475 void CopyMemory(Memory<T> &src, Memory<T> &dst, MemoryClass dst_mc,
476  bool dst_owner)
477 {
478  if (CanShallowCopy(src, dst_mc))
479  {
480  // shallow copy
481  if (!dst_owner)
482  {
483  src.Read(dst_mc, src.Capacity()); // Registers src if on host only
484  dst.MakeAlias(src, 0, src.Capacity());
485  }
486  else
487  {
488  dst = src;
489  src.ClearOwnerFlags();
490  }
491  }
492  else
493  {
494  // deep copy
495  dst.New(src.Capacity(), GetMemoryType(dst_mc));
496  dst.CopyFrom(src, src.Capacity());
497  }
498 }
499 
500 /** @brief Deep copy and convert @a src to @a dst with the goal to make the
501  array @a src accessible through @a dst with the MemoryClass @a dst_mc and
502  convert it from type SrcT to type DstT. */
503 /** When @a dst is no longer needed, dst.Delete() must be called to ensure all
504  associated memory allocations are freed.
505 
506  The input contents of @a dst, if any, is not used and it is overwritten by
507  this function. In particular, @a dst should be empty or deleted before
508  calling this function. */
509 template <typename SrcT, typename DstT>
511 {
512  auto capacity = src.Capacity();
513  dst.New(capacity, GetMemoryType(dst_mc));
514  // Perform the copy using the configured mfem Device
515  auto src_p = mfem::Read(src, capacity);
516  auto dst_p = mfem::Write(dst, capacity);
517  mfem::forall(capacity, [=] MFEM_HOST_DEVICE (int i) { dst_p[i] = src_p[i]; });
518 }
519 
520 
521 void HypreParMatrix::Init()
522 {
523  A = NULL;
524  X = Y = NULL;
525  auxX.Reset(); auxY.Reset();
526  diagOwner = offdOwner = colMapOwner = -1;
527  ParCSROwner = 1;
528  mem_diag.I.Reset();
529  mem_diag.J.Reset();
530  mem_diag.data.Reset();
531  mem_offd.I.Reset();
532  mem_offd.J.Reset();
533  mem_offd.data.Reset();
534 }
535 
536 void HypreParMatrix::Read(MemoryClass mc) const
537 {
538  hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
539  hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
540  const int num_rows = NumRows();
541  const int diag_nnz = internal::to_int(diag->num_nonzeros);
542  const int offd_nnz = internal::to_int(offd->num_nonzeros);
543  diag->i = const_cast<HYPRE_Int*>(mem_diag.I.Read(mc, num_rows+1));
544  diag->j = const_cast<HYPRE_Int*>(mem_diag.J.Read(mc, diag_nnz));
545  diag->data = const_cast<double*>(mem_diag.data.Read(mc, diag_nnz));
546  offd->i = const_cast<HYPRE_Int*>(mem_offd.I.Read(mc, num_rows+1));
547  offd->j = const_cast<HYPRE_Int*>(mem_offd.J.Read(mc, offd_nnz));
548  offd->data = const_cast<double*>(mem_offd.data.Read(mc, offd_nnz));
549 #if MFEM_HYPRE_VERSION >= 21800
550  decltype(diag->memory_location) ml =
551  (mc != GetHypreMemoryClass() ? HYPRE_MEMORY_HOST : HYPRE_MEMORY_DEVICE);
552  diag->memory_location = ml;
553  offd->memory_location = ml;
554 #endif
555 }
556 
557 void HypreParMatrix::ReadWrite(MemoryClass mc)
558 {
559  hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
560  hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
561  const int num_rows = NumRows();
562  const int diag_nnz = internal::to_int(diag->num_nonzeros);
563  const int offd_nnz = internal::to_int(offd->num_nonzeros);
564  diag->i = mem_diag.I.ReadWrite(mc, num_rows+1);
565  diag->j = mem_diag.J.ReadWrite(mc, diag_nnz);
566  diag->data = mem_diag.data.ReadWrite(mc, diag_nnz);
567  offd->i = mem_offd.I.ReadWrite(mc, num_rows+1);
568  offd->j = mem_offd.J.ReadWrite(mc, offd_nnz);
569  offd->data = mem_offd.data.ReadWrite(mc, offd_nnz);
570 #if MFEM_HYPRE_VERSION >= 21800
571  decltype(diag->memory_location) ml =
572  (mc != GetHypreMemoryClass() ? HYPRE_MEMORY_HOST : HYPRE_MEMORY_DEVICE);
573  diag->memory_location = ml;
574  offd->memory_location = ml;
575 #endif
576 }
577 
578 void HypreParMatrix::Write(MemoryClass mc, bool set_diag, bool set_offd)
579 {
580  hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
581  hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
582  if (set_diag)
583  {
584  diag->i = mem_diag.I.Write(mc, mem_diag.I.Capacity());
585  diag->j = mem_diag.J.Write(mc, mem_diag.J.Capacity());
586  diag->data = mem_diag.data.Write(mc, mem_diag.data.Capacity());
587  }
588  if (set_offd)
589  {
590  offd->i = mem_offd.I.Write(mc, mem_offd.I.Capacity());
591  offd->j = mem_offd.J.Write(mc, mem_offd.J.Capacity());
592  offd->data = mem_offd.data.Write(mc, mem_offd.data.Capacity());
593  }
594 #if MFEM_HYPRE_VERSION >= 21800
595  decltype(diag->memory_location) ml =
596  (mc != GetHypreMemoryClass() ? HYPRE_MEMORY_HOST : HYPRE_MEMORY_DEVICE);
597  if (set_diag) { diag->memory_location = ml; }
598  if (set_offd) { offd->memory_location = ml; }
599 #endif
600 }
601 
603 {
604  Init();
605  height = width = 0;
606 }
607 
608 void HypreParMatrix::WrapHypreParCSRMatrix(hypre_ParCSRMatrix *a, bool owner)
609 {
610  Destroy();
611  Init();
612  A = a;
613  ParCSROwner = owner;
614  height = GetNumRows();
615  width = GetNumCols();
616 #if MFEM_HYPRE_VERSION >= 21800
617  MemoryType diag_mt = (A->diag->memory_location == HYPRE_MEMORY_HOST
619  MemoryType offd_mt = (A->offd->memory_location == HYPRE_MEMORY_HOST
621 #else
622  const MemoryType diag_mt = MemoryType::HOST;
623  const MemoryType offd_mt = MemoryType::HOST;
624 #endif
625  diagOwner = HypreCsrToMem(A->diag, diag_mt, false, mem_diag);
626  offdOwner = HypreCsrToMem(A->offd, offd_mt, false, mem_offd);
627  HypreRead();
628 }
629 
630 signed char HypreParMatrix::CopyCSR(SparseMatrix *csr,
631  MemoryIJData &mem_csr,
632  hypre_CSRMatrix *hypre_csr,
633  bool mem_owner)
634 {
635  const MemoryClass hypre_mc = GetHypreMemoryClass();
636 #ifndef HYPRE_BIGINT
637  // code for the case HYPRE_Int == int
638  CopyMemory(csr->GetMemoryI(), mem_csr.I, hypre_mc, mem_owner);
639  CopyMemory(csr->GetMemoryJ(), mem_csr.J, hypre_mc, mem_owner);
640 #else
641  // code for the case HYPRE_Int == long long int
642  CopyConvertMemory(csr->GetMemoryI(), hypre_mc, mem_csr.I);
643  CopyConvertMemory(csr->GetMemoryJ(), hypre_mc, mem_csr.J);
644 #endif
645  CopyMemory(csr->GetMemoryData(), mem_csr.data, hypre_mc, mem_owner);
646 
647  const int num_rows = csr->Height();
648  const int nnz = csr->NumNonZeroElems();
649  hypre_csr->i = const_cast<HYPRE_Int*>(mem_csr.I.Read(hypre_mc, num_rows+1));
650  hypre_csr->j = const_cast<HYPRE_Int*>(mem_csr.J.Read(hypre_mc, nnz));
651  hypre_csr->data = const_cast<double*>(mem_csr.data.Read(hypre_mc, nnz));
652 
653  MFEM_ASSERT(mem_csr.I.OwnsHostPtr() == mem_csr.J.OwnsHostPtr(),
654  "invalid state: host ownership for I and J differ!");
655  return (mem_csr.I.OwnsHostPtr() ? 1 : 0) +
656  (mem_csr.data.OwnsHostPtr() ? 2 : 0);
657 }
658 
659 signed char HypreParMatrix::CopyBoolCSR(Table *bool_csr,
660  MemoryIJData &mem_csr,
661  hypre_CSRMatrix *hypre_csr)
662 {
663  const MemoryClass hypre_mc = GetHypreMemoryClass();
664 #ifndef HYPRE_BIGINT
665  // code for the case HYPRE_Int == int
666  CopyMemory(bool_csr->GetIMemory(), mem_csr.I, hypre_mc, false);
667  CopyMemory(bool_csr->GetJMemory(), mem_csr.J, hypre_mc, false);
668 #else
669  // code for the case HYPRE_Int == long long int
670  CopyConvertMemory(bool_csr->GetIMemory(), hypre_mc, mem_csr.I);
671  CopyConvertMemory(bool_csr->GetJMemory(), hypre_mc, mem_csr.J);
672 #endif
673  const int num_rows = bool_csr->Size();
674  const int nnz = bool_csr->Size_of_connections();
675  mem_csr.data.New(nnz, GetHypreMemoryType());
676  double *data = mfem::HostWrite(mem_csr.data, nnz);
677  for (int i = 0; i < nnz; i++)
678  {
679  data[i] = 1.0;
680  }
681  hypre_csr->i = const_cast<HYPRE_Int*>(mem_csr.I.Read(hypre_mc, num_rows+1));
682  hypre_csr->j = const_cast<HYPRE_Int*>(mem_csr.J.Read(hypre_mc, nnz));
683  hypre_csr->data = const_cast<double*>(mem_csr.data.Read(hypre_mc, nnz));
684 
685  MFEM_ASSERT(mem_csr.I.OwnsHostPtr() == mem_csr.J.OwnsHostPtr(),
686  "invalid state: host ownership for I and J differ!");
687  return (mem_csr.I.OwnsHostPtr() ? 1 : 0) +
688  (mem_csr.data.OwnsHostPtr() ? 2 : 0);
689 }
690 
691 // Copy the j array of a MemoryIJData object to the given dst_J array,
692 // converting the indices from HYPRE_Int to int.
693 #ifdef HYPRE_BIGINT
694 static void CopyCSR_J(const int nnz, const MemoryIJData &mem_csr,
695  Memory<int> &dst_J)
696 {
697  // Perform the copy using the configured mfem Device
698  auto src_p = mfem::Read(mem_csr.J, nnz);
699  auto dst_p = mfem::Write(dst_J, nnz);
700  mfem::forall(nnz, [=] MFEM_HOST_DEVICE (int i) { dst_p[i] = src_p[i]; });
701 }
702 #endif
703 
704 // static method
705 signed char HypreParMatrix::HypreCsrToMem(hypre_CSRMatrix *h_mat,
706  MemoryType h_mat_mt,
707  bool own_ija,
708  MemoryIJData &mem)
709 {
710  const int nr1 = internal::to_int(h_mat->num_rows) + 1;
711  const int nnz = internal::to_int(h_mat->num_nonzeros);
712  mem.I.Wrap(h_mat->i, nr1, h_mat_mt, own_ija);
713  mem.J.Wrap(h_mat->j, nnz, h_mat_mt, own_ija);
714  mem.data.Wrap(h_mat->data, nnz, h_mat_mt, own_ija);
715  const MemoryClass hypre_mc = GetHypreMemoryClass();
716  if (!CanShallowCopy(mem.I, hypre_mc))
717  {
718  const MemoryType hypre_mt = GetHypreMemoryType();
719  MemoryIJData h_mem;
720  h_mem.I.New(nr1, hypre_mt);
721  h_mem.I.CopyFrom(mem.I, nr1);
722  mem.I.Delete();
723  h_mem.J.New(nnz, hypre_mt);
724  h_mem.J.CopyFrom(mem.J, nnz);
725  mem.J.Delete();
726  h_mem.data.New(nnz, hypre_mt);
727  h_mem.data.CopyFrom(mem.data, nnz);
728  mem.data.Delete();
729  mem = h_mem;
730  if (!own_ija)
731  {
732  // FIXME: Even if own_ija == false, it does not necessarily mean we
733  // need to delete h_mat->{i,j,data} even if h_mat->owns_data == true.
734 
735  // h_mat owns i; owns j,data if h_mat->owns_data
736 #if MFEM_HYPRE_VERSION < 21400
737  hypre_TFree(h_mat->i);
738 #elif MFEM_HYPRE_VERSION < 21800
739  hypre_TFree(h_mat->i, HYPRE_MEMORY_SHARED);
740 #else
741  hypre_TFree(h_mat->i, h_mat->memory_location);
742 #endif
743  if (h_mat->owns_data)
744  {
745 #if MFEM_HYPRE_VERSION < 21400
746  hypre_TFree(h_mat->j);
747  hypre_TFree(h_mat->data);
748 #elif MFEM_HYPRE_VERSION < 21800
749  hypre_TFree(h_mat->j, HYPRE_MEMORY_SHARED);
750  hypre_TFree(h_mat->data, HYPRE_MEMORY_SHARED);
751 #else
752  hypre_TFree(h_mat->j, h_mat->memory_location);
753  hypre_TFree(h_mat->data, h_mat->memory_location);
754 #endif
755  }
756  }
757  h_mat->i = mem.I.ReadWrite(hypre_mc, nr1);
758  h_mat->j = mem.J.ReadWrite(hypre_mc, nnz);
759  h_mat->data = mem.data.ReadWrite(hypre_mc, nnz);
760  h_mat->owns_data = 0;
761 #if MFEM_HYPRE_VERSION >= 21800
762  h_mat->memory_location = HYPRE_MEMORY_DEVICE;
763 #endif
764  return 3;
765  }
766  return own_ija ? 3 : (h_mat_mt == GetHypreMemoryType() ? -2 : -1);
767 }
768 
769 // Square block-diagonal constructor (4 arguments, v1)
771  HYPRE_BigInt *row_starts, SparseMatrix *diag)
772  : Operator(diag->Height(), diag->Width())
773 {
774  Init();
775  A = hypre_ParCSRMatrixCreate(comm, glob_size, glob_size, row_starts,
776  row_starts, 0, diag->NumNonZeroElems(), 0);
777  hypre_ParCSRMatrixSetDataOwner(A,1);
778 #if MFEM_HYPRE_VERSION <= 22200
779  hypre_ParCSRMatrixSetRowStartsOwner(A,0);
780  hypre_ParCSRMatrixSetColStartsOwner(A,0);
781 #endif
782 
783  hypre_CSRMatrixSetDataOwner(A->diag,0);
784  diagOwner = CopyCSR(diag, mem_diag, A->diag, false);
785  hypre_CSRMatrixSetRownnz(A->diag);
786 
787  hypre_CSRMatrixSetDataOwner(A->offd,1);
788  hypre_CSRMatrixI(A->offd) = mfem_hypre_CTAlloc(HYPRE_Int, diag->Height()+1);
789  offdOwner = HypreCsrToMem(A->offd, GetHypreMemoryType(), false, mem_offd);
790 
791  /* Don't need to call these, since they allocate memory only
792  if it was not already allocated */
793  // hypre_CSRMatrixInitialize(A->diag);
794  // hypre_ParCSRMatrixInitialize(A);
795 
796  hypre_ParCSRMatrixSetNumNonzeros(A);
797 
798  /* Make sure that the first entry in each row is the diagonal one. */
799  HypreReadWrite();
800  hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
801 #ifdef HYPRE_BIGINT
803  {
804  CopyCSR_J(A->diag->num_nonzeros, mem_diag, diag->GetMemoryJ());
805  }
806 #endif
807 
808  hypre_MatvecCommPkgCreate(A);
809 }
810 
811 // Rectangular block-diagonal constructor (6 arguments, v1)
813  HYPRE_BigInt global_num_rows,
814  HYPRE_BigInt global_num_cols,
815  HYPRE_BigInt *row_starts,
816  HYPRE_BigInt *col_starts,
817  SparseMatrix *diag)
818  : Operator(diag->Height(), diag->Width())
819 {
820  Init();
821  A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
822  row_starts, col_starts,
823  0, diag->NumNonZeroElems(), 0);
824  hypre_ParCSRMatrixSetDataOwner(A,1);
825 #if MFEM_HYPRE_VERSION <= 22200
826  hypre_ParCSRMatrixSetRowStartsOwner(A,0);
827  hypre_ParCSRMatrixSetColStartsOwner(A,0);
828 #endif
829 
830  hypre_CSRMatrixSetDataOwner(A->diag,0);
831  diagOwner = CopyCSR(diag, mem_diag, A->diag, false);
832  hypre_CSRMatrixSetRownnz(A->diag);
833 
834  hypre_CSRMatrixSetDataOwner(A->offd,1);
835  hypre_CSRMatrixI(A->offd) = mfem_hypre_CTAlloc(HYPRE_Int, diag->Height()+1);
836  offdOwner = HypreCsrToMem(A->offd, GetHypreMemoryType(), false, mem_offd);
837 
838  hypre_ParCSRMatrixSetNumNonzeros(A);
839 
840  /* Make sure that the first entry in each row is the diagonal one. */
841  if (row_starts == col_starts)
842  {
843  HypreReadWrite();
844  hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
845 #ifdef HYPRE_BIGINT
847  {
848  CopyCSR_J(A->diag->num_nonzeros, mem_diag, diag->GetMemoryJ());
849  }
850 #endif
851  }
852 
853  hypre_MatvecCommPkgCreate(A);
854 }
855 
856 // General rectangular constructor with diagonal and off-diagonal (8+1
857 // arguments)
859  HYPRE_BigInt global_num_rows,
860  HYPRE_BigInt global_num_cols,
861  HYPRE_BigInt *row_starts,
862  HYPRE_BigInt *col_starts,
863  SparseMatrix *diag, SparseMatrix *offd,
864  HYPRE_BigInt *cmap,
865  bool own_diag_offd)
866  : Operator(diag->Height(), diag->Width())
867 {
868  Init();
869  A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
870  row_starts, col_starts,
871  offd->Width(), diag->NumNonZeroElems(),
872  offd->NumNonZeroElems());
873  hypre_ParCSRMatrixSetDataOwner(A,1);
874 #if MFEM_HYPRE_VERSION <= 22200
875  hypre_ParCSRMatrixSetRowStartsOwner(A,0);
876  hypre_ParCSRMatrixSetColStartsOwner(A,0);
877 #endif
878 
879  hypre_CSRMatrixSetDataOwner(A->diag,0);
880  diagOwner = CopyCSR(diag, mem_diag, A->diag, own_diag_offd);
881  if (own_diag_offd) { delete diag; }
882  hypre_CSRMatrixSetRownnz(A->diag);
883 
884  hypre_CSRMatrixSetDataOwner(A->offd,0);
885  offdOwner = CopyCSR(offd, mem_offd, A->offd, own_diag_offd);
886  if (own_diag_offd) { delete offd; }
887  hypre_CSRMatrixSetRownnz(A->offd);
888 
889  hypre_ParCSRMatrixColMapOffd(A) = cmap;
890  // Prevent hypre from destroying A->col_map_offd
891  colMapOwner = 0;
892 
893  hypre_ParCSRMatrixSetNumNonzeros(A);
894 
895  /* Make sure that the first entry in each row is the diagonal one. */
896  if (row_starts == col_starts)
897  {
898  HypreReadWrite();
899  hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
900 #ifdef HYPRE_BIGINT
902  {
903  CopyCSR_J(A->diag->num_nonzeros, mem_diag, diag->GetMemoryJ());
904  }
905 #endif
906  }
907 
908  hypre_MatvecCommPkgCreate(A);
909 }
910 
911 // General rectangular constructor with diagonal and off-diagonal (13+1
912 // arguments)
914  MPI_Comm comm,
915  HYPRE_BigInt global_num_rows, HYPRE_BigInt global_num_cols,
916  HYPRE_BigInt *row_starts, HYPRE_BigInt *col_starts,
917  HYPRE_Int *diag_i, HYPRE_Int *diag_j, double *diag_data,
918  HYPRE_Int *offd_i, HYPRE_Int *offd_j, double *offd_data,
919  HYPRE_Int offd_num_cols, HYPRE_BigInt *offd_col_map,
920  bool hypre_arrays)
921 {
922  Init();
923  A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
924  row_starts, col_starts, offd_num_cols, 0, 0);
925  hypre_ParCSRMatrixSetDataOwner(A,1);
926 #if MFEM_HYPRE_VERSION <= 22200
927  hypre_ParCSRMatrixSetRowStartsOwner(A,0);
928  hypre_ParCSRMatrixSetColStartsOwner(A,0);
929 #endif
930 
931  HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A->diag);
932 
933  hypre_CSRMatrixSetDataOwner(A->diag, hypre_arrays);
934  hypre_CSRMatrixI(A->diag) = diag_i;
935  hypre_CSRMatrixJ(A->diag) = diag_j;
936  hypre_CSRMatrixData(A->diag) = diag_data;
937  hypre_CSRMatrixNumNonzeros(A->diag) = diag_i[local_num_rows];
938 #ifdef HYPRE_USING_GPU
939  hypre_CSRMatrixMemoryLocation(A->diag) = HYPRE_MEMORY_HOST;
940 #endif
941 
942  hypre_CSRMatrixSetDataOwner(A->offd, hypre_arrays);
943  hypre_CSRMatrixI(A->offd) = offd_i;
944  hypre_CSRMatrixJ(A->offd) = offd_j;
945  hypre_CSRMatrixData(A->offd) = offd_data;
946  hypre_CSRMatrixNumNonzeros(A->offd) = offd_i[local_num_rows];
947 #ifdef HYPRE_USING_GPU
948  hypre_CSRMatrixMemoryLocation(A->offd) = HYPRE_MEMORY_HOST;
949 #endif
950 
951  hypre_ParCSRMatrixColMapOffd(A) = offd_col_map;
952  // Prevent hypre from destroying A->col_map_offd, own A->col_map_offd
953  colMapOwner = hypre_arrays ? -1 : 1;
954 
955  hypre_ParCSRMatrixSetNumNonzeros(A);
956 
957  /* Make sure that the first entry in each row is the diagonal one. */
958  if (row_starts == col_starts)
959  {
960  hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
961  }
962 
963  hypre_MatvecCommPkgCreate(A);
964 
965  height = GetNumRows();
966  width = GetNumCols();
967 
968  if (!hypre_arrays)
969  {
970  const MemoryType host_mt = Device::GetHostMemoryType();
971  diagOwner = HypreCsrToMem(A->diag, host_mt, true, mem_diag);
972  offdOwner = HypreCsrToMem(A->offd, host_mt, true, mem_offd);
973  }
974  else
975  {
976  const MemoryType host_mt = MemoryType::HOST;
977  diagOwner = HypreCsrToMem(A->diag, host_mt, false, mem_diag);
978  offdOwner = HypreCsrToMem(A->offd, host_mt, false, mem_offd);
979  }
980  HypreRead();
981 
982  hypre_CSRMatrixSetRownnz(A->diag);
983  hypre_CSRMatrixSetRownnz(A->offd);
984 }
985 
986 // Constructor from a CSR matrix on rank 0 (4 arguments, v2)
988  HYPRE_BigInt *row_starts,
989  HYPRE_BigInt *col_starts,
990  SparseMatrix *sm_a)
991 {
992  MFEM_ASSERT(sm_a != NULL, "invalid input");
993  MFEM_VERIFY(!HYPRE_AssumedPartitionCheck(),
994  "this method can not be used with assumed partition");
995 
996  Init();
997 
998  hypre_CSRMatrix *csr_a;
999  csr_a = hypre_CSRMatrixCreate(sm_a -> Height(), sm_a -> Width(),
1000  sm_a -> NumNonZeroElems());
1001 
1002  hypre_CSRMatrixSetDataOwner(csr_a,0);
1003  MemoryIJData mem_a;
1004  CopyCSR(sm_a, mem_a, csr_a, false);
1005  hypre_CSRMatrixSetRownnz(csr_a);
1006 
1007  // NOTE: this call creates a matrix on host even when device support is
1008  // enabled in hypre.
1009  hypre_ParCSRMatrix *new_A =
1010  hypre_CSRMatrixToParCSRMatrix(comm, csr_a, row_starts, col_starts);
1011 
1012  mem_a.I.Delete();
1013  mem_a.J.Delete();
1014  mem_a.data.Delete();
1015 
1016  hypre_CSRMatrixI(csr_a) = NULL;
1017  hypre_CSRMatrixDestroy(csr_a);
1018 
1019  /* Make sure that the first entry in each row is the diagonal one. */
1020  if (row_starts == col_starts)
1021  {
1022  hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(new_A));
1023  }
1024 
1025  hypre_MatvecCommPkgCreate(A);
1026 
1027  WrapHypreParCSRMatrix(new_A);
1028 }
1029 
1030 // Boolean, rectangular, block-diagonal constructor (6 arguments, v2)
1032  HYPRE_BigInt global_num_rows,
1033  HYPRE_BigInt global_num_cols,
1034  HYPRE_BigInt *row_starts,
1035  HYPRE_BigInt *col_starts,
1036  Table *diag)
1037 {
1038  Init();
1039  int nnz = diag->Size_of_connections();
1040  A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
1041  row_starts, col_starts, 0, nnz, 0);
1042  hypre_ParCSRMatrixSetDataOwner(A,1);
1043 #if MFEM_HYPRE_VERSION <= 22200
1044  hypre_ParCSRMatrixSetRowStartsOwner(A,0);
1045  hypre_ParCSRMatrixSetColStartsOwner(A,0);
1046 #endif
1047 
1048  hypre_CSRMatrixSetDataOwner(A->diag,0);
1049  diagOwner = CopyBoolCSR(diag, mem_diag, A->diag);
1050  hypre_CSRMatrixSetRownnz(A->diag);
1051 
1052  hypre_CSRMatrixSetDataOwner(A->offd,1);
1053  hypre_CSRMatrixI(A->offd) = mfem_hypre_CTAlloc(HYPRE_Int, diag->Size()+1);
1054  offdOwner = HypreCsrToMem(A->offd, GetHypreMemoryType(), false, mem_offd);
1055 
1056  hypre_ParCSRMatrixSetNumNonzeros(A);
1057 
1058  /* Make sure that the first entry in each row is the diagonal one. */
1059  if (row_starts == col_starts)
1060  {
1061  HypreReadWrite();
1062  hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
1063 #ifdef HYPRE_BIGINT
1064  // No need to sync the J array back to the Table diag.
1065  // CopyCSR_J(A->diag->num_nonzeros, mem_diag, diag->GetJMemory());
1066 #endif
1067  }
1068 
1069  hypre_MatvecCommPkgCreate(A);
1070 
1071  height = GetNumRows();
1072  width = GetNumCols();
1073 }
1074 
1075 // Boolean, general rectangular constructor with diagonal and off-diagonal
1076 // (11 arguments)
1077 HypreParMatrix::HypreParMatrix(MPI_Comm comm, int id, int np,
1078  HYPRE_BigInt *row, HYPRE_BigInt *col,
1079  HYPRE_Int *i_diag, HYPRE_Int *j_diag,
1080  HYPRE_Int *i_offd, HYPRE_Int *j_offd,
1081  HYPRE_BigInt *cmap, HYPRE_Int cmap_size)
1082 {
1083  HYPRE_Int diag_nnz, offd_nnz;
1084 
1085  Init();
1086  if (HYPRE_AssumedPartitionCheck())
1087  {
1088  diag_nnz = i_diag[row[1]-row[0]];
1089  offd_nnz = i_offd[row[1]-row[0]];
1090 
1091  A = hypre_ParCSRMatrixCreate(comm, row[2], col[2], row, col,
1092  cmap_size, diag_nnz, offd_nnz);
1093  }
1094  else
1095  {
1096  diag_nnz = i_diag[row[id+1]-row[id]];
1097  offd_nnz = i_offd[row[id+1]-row[id]];
1098 
1099  A = hypre_ParCSRMatrixCreate(comm, row[np], col[np], row, col,
1100  cmap_size, diag_nnz, offd_nnz);
1101  }
1102 
1103  hypre_ParCSRMatrixSetDataOwner(A,1);
1104 #if MFEM_HYPRE_VERSION <= 22200
1105  hypre_ParCSRMatrixSetRowStartsOwner(A,0);
1106  hypre_ParCSRMatrixSetColStartsOwner(A,0);
1107 #endif
1108 
1109  mem_diag.data.New(diag_nnz);
1110  for (HYPRE_Int i = 0; i < diag_nnz; i++)
1111  {
1112  mem_diag.data[i] = 1.0;
1113  }
1114 
1115  mem_offd.data.New(offd_nnz);
1116  for (HYPRE_Int i = 0; i < offd_nnz; i++)
1117  {
1118  mem_offd.data[i] = 1.0;
1119  }
1120 
1121  hypre_CSRMatrixSetDataOwner(A->diag,0);
1122  hypre_CSRMatrixI(A->diag) = i_diag;
1123  hypre_CSRMatrixJ(A->diag) = j_diag;
1124  hypre_CSRMatrixData(A->diag) = mem_diag.data;
1125 #ifdef HYPRE_USING_GPU
1126  hypre_CSRMatrixMemoryLocation(A->diag) = HYPRE_MEMORY_HOST;
1127 #endif
1128 
1129  hypre_CSRMatrixSetDataOwner(A->offd,0);
1130  hypre_CSRMatrixI(A->offd) = i_offd;
1131  hypre_CSRMatrixJ(A->offd) = j_offd;
1132  hypre_CSRMatrixData(A->offd) = mem_offd.data;
1133 #ifdef HYPRE_USING_GPU
1134  hypre_CSRMatrixMemoryLocation(A->offd) = HYPRE_MEMORY_HOST;
1135 #endif
1136 
1137  hypre_ParCSRMatrixColMapOffd(A) = cmap;
1138  // Prevent hypre from destroying A->col_map_offd, own A->col_map_offd
1139  colMapOwner = 1;
1140 
1141  hypre_ParCSRMatrixSetNumNonzeros(A);
1142 
1143  /* Make sure that the first entry in each row is the diagonal one. */
1144  if (row == col)
1145  {
1146  hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
1147  }
1148 
1149  hypre_MatvecCommPkgCreate(A);
1150 
1151  height = GetNumRows();
1152  width = GetNumCols();
1153 
1154  const MemoryType host_mt = Device::GetHostMemoryType();
1155  diagOwner = HypreCsrToMem(A->diag, host_mt, true, mem_diag);
1156  offdOwner = HypreCsrToMem(A->offd, host_mt, true, mem_offd);
1157  HypreRead();
1158 
1159  hypre_CSRMatrixSetRownnz(A->diag);
1160  hypre_CSRMatrixSetRownnz(A->offd);
1161 }
1162 
1163 // General rectangular constructor with diagonal and off-diagonal constructed
1164 // from a CSR matrix that contains both diagonal and off-diagonal blocks
1165 // (9 arguments)
1166 HypreParMatrix::HypreParMatrix(MPI_Comm comm, int nrows,
1167  HYPRE_BigInt glob_nrows,
1168  HYPRE_BigInt glob_ncols,
1169  int *I, HYPRE_BigInt *J,
1170  double *data,
1171  HYPRE_BigInt *rows,
1172  HYPRE_BigInt *cols)
1173 {
1174  Init();
1175 
1176  // Determine partitioning size, and my column start and end
1177  int part_size;
1178  HYPRE_BigInt my_col_start, my_col_end; // my range: [my_col_start, my_col_end)
1179  if (HYPRE_AssumedPartitionCheck())
1180  {
1181  part_size = 2;
1182  my_col_start = cols[0];
1183  my_col_end = cols[1];
1184  }
1185  else
1186  {
1187  int myid;
1188  MPI_Comm_rank(comm, &myid);
1189  MPI_Comm_size(comm, &part_size);
1190  part_size++;
1191  my_col_start = cols[myid];
1192  my_col_end = cols[myid+1];
1193  }
1194 
1195  // Copy in the row and column partitionings
1196  HYPRE_BigInt *row_starts, *col_starts;
1197  if (rows == cols)
1198  {
1199  row_starts = col_starts = mfem_hypre_TAlloc_host(HYPRE_BigInt, part_size);
1200  for (int i = 0; i < part_size; i++)
1201  {
1202  row_starts[i] = rows[i];
1203  }
1204  }
1205  else
1206  {
1207  row_starts = mfem_hypre_TAlloc_host(HYPRE_BigInt, part_size);
1208  col_starts = mfem_hypre_TAlloc_host(HYPRE_BigInt, part_size);
1209  for (int i = 0; i < part_size; i++)
1210  {
1211  row_starts[i] = rows[i];
1212  col_starts[i] = cols[i];
1213  }
1214  }
1215 
1216  // Create a map for the off-diagonal indices - global to local. Count the
1217  // number of diagonal and off-diagonal entries.
1218  HYPRE_Int diag_nnz = 0, offd_nnz = 0, offd_num_cols = 0;
1219  map<HYPRE_BigInt, HYPRE_Int> offd_map;
1220  for (HYPRE_Int j = 0, loc_nnz = I[nrows]; j < loc_nnz; j++)
1221  {
1222  HYPRE_BigInt glob_col = J[j];
1223  if (my_col_start <= glob_col && glob_col < my_col_end)
1224  {
1225  diag_nnz++;
1226  }
1227  else
1228  {
1229  offd_map.insert(pair<const HYPRE_BigInt, HYPRE_Int>(glob_col, -1));
1230  offd_nnz++;
1231  }
1232  }
1233  // count the number of columns in the off-diagonal and set the local indices
1234  for (auto it = offd_map.begin(); it != offd_map.end(); ++it)
1235  {
1236  it->second = offd_num_cols++;
1237  }
1238 
1239  // construct the global ParCSR matrix
1240  A = hypre_ParCSRMatrixCreate(comm, glob_nrows, glob_ncols,
1241  row_starts, col_starts, offd_num_cols,
1242  diag_nnz, offd_nnz);
1243  hypre_ParCSRMatrixInitialize(A);
1244 
1245  diagOwner = HypreCsrToMem(A->diag, GetHypreMemoryType(), false, mem_diag);
1246  offdOwner = HypreCsrToMem(A->offd, GetHypreMemoryType(), false, mem_offd);
1247  HostWrite();
1248 
1249  HYPRE_Int *diag_i, *diag_j, *offd_i, *offd_j;
1250  HYPRE_BigInt *offd_col_map;
1251  double *diag_data, *offd_data;
1252  diag_i = A->diag->i;
1253  diag_j = A->diag->j;
1254  diag_data = A->diag->data;
1255  offd_i = A->offd->i;
1256  offd_j = A->offd->j;
1257  offd_data = A->offd->data;
1258  offd_col_map = A->col_map_offd;
1259 
1260  diag_nnz = offd_nnz = 0;
1261  for (HYPRE_Int i = 0, j = 0; i < nrows; i++)
1262  {
1263  diag_i[i] = diag_nnz;
1264  offd_i[i] = offd_nnz;
1265  for (HYPRE_Int j_end = I[i+1]; j < j_end; j++)
1266  {
1267  HYPRE_BigInt glob_col = J[j];
1268  if (my_col_start <= glob_col && glob_col < my_col_end)
1269  {
1270  diag_j[diag_nnz] = glob_col - my_col_start;
1271  diag_data[diag_nnz] = data[j];
1272  diag_nnz++;
1273  }
1274  else
1275  {
1276  offd_j[offd_nnz] = offd_map[glob_col];
1277  offd_data[offd_nnz] = data[j];
1278  offd_nnz++;
1279  }
1280  }
1281  }
1282  diag_i[nrows] = diag_nnz;
1283  offd_i[nrows] = offd_nnz;
1284  for (auto it = offd_map.begin(); it != offd_map.end(); ++it)
1285  {
1286  offd_col_map[it->second] = it->first;
1287  }
1288 
1289  hypre_ParCSRMatrixSetNumNonzeros(A);
1290  /* Make sure that the first entry in each row is the diagonal one. */
1291  if (row_starts == col_starts)
1292  {
1293  hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
1294  }
1295 #if MFEM_HYPRE_VERSION > 22200
1296  mfem_hypre_TFree_host(row_starts);
1297  if (rows != cols)
1298  {
1299  mfem_hypre_TFree_host(col_starts);
1300  }
1301 #endif
1302  hypre_MatvecCommPkgCreate(A);
1303 
1304  height = GetNumRows();
1305  width = GetNumCols();
1306 
1307  HypreRead();
1308 }
1309 
1311 {
1312  hypre_ParCSRMatrix *Ph = static_cast<hypre_ParCSRMatrix *>(P);
1313 
1314  Init();
1315 
1316  // Clone the structure
1317  A = hypre_ParCSRMatrixCompleteClone(Ph);
1318  // Make a deep copy of the data from the source
1319  hypre_ParCSRMatrixCopy(Ph, A, 1);
1320 
1321  height = GetNumRows();
1322  width = GetNumCols();
1323 
1324  CopyRowStarts();
1325  CopyColStarts();
1326 
1327  hypre_ParCSRMatrixSetNumNonzeros(A);
1328 
1329  hypre_MatvecCommPkgCreate(A);
1330 
1331  diagOwner = HypreCsrToMem(A->diag, GetHypreMemoryType(), false, mem_diag);
1332  offdOwner = HypreCsrToMem(A->offd, GetHypreMemoryType(), false, mem_offd);
1333 }
1334 
1336 {
1337  Destroy();
1338  Init();
1339  A = master.A;
1340  ParCSROwner = 0;
1341  height = master.GetNumRows();
1342  width = master.GetNumCols();
1343  mem_diag.I.MakeAlias(master.mem_diag.I, 0, master.mem_diag.I.Capacity());
1344  mem_diag.J.MakeAlias(master.mem_diag.J, 0, master.mem_diag.J.Capacity());
1345  mem_diag.data.MakeAlias(master.mem_diag.data, 0,
1346  master.mem_diag.data.Capacity());
1347  mem_offd.I.MakeAlias(master.mem_offd.I, 0, master.mem_offd.I.Capacity());
1348  mem_offd.J.MakeAlias(master.mem_offd.J, 0, master.mem_offd.J.Capacity());
1349  mem_offd.data.MakeAlias(master.mem_offd.data, 0,
1350  master.mem_offd.data.Capacity());
1351 }
1352 
1353 hypre_ParCSRMatrix* HypreParMatrix::StealData()
1354 {
1355  // Only safe when (diagOwner < 0 && offdOwner < 0 && colMapOwner == -1)
1356  // Otherwise, there may be memory leaks or hypre may destroy arrays allocated
1357  // with operator new.
1358  MFEM_ASSERT(diagOwner < 0 && offdOwner < 0 && colMapOwner == -1, "");
1359  MFEM_ASSERT(diagOwner == offdOwner, "");
1360  MFEM_ASSERT(ParCSROwner, "");
1361  hypre_ParCSRMatrix *R = A;
1362 #ifdef HYPRE_USING_GPU
1363  if (diagOwner == -1) { HostReadWrite(); }
1364  else { HypreReadWrite(); }
1365 #endif
1366  ParCSROwner = false;
1367  Destroy();
1368  Init();
1369  return R;
1370 }
1371 
1372 void HypreParMatrix::SetOwnerFlags(signed char diag, signed char offd,
1373  signed char colmap)
1374 {
1375  diagOwner = diag;
1376  mem_diag.I.SetHostPtrOwner((diag >= 0) && (diag & 1));
1377  mem_diag.I.SetDevicePtrOwner((diag >= 0) && (diag & 1));
1378 
1379  mem_diag.J.SetHostPtrOwner((diag >= 0) && (diag & 1));
1380  mem_diag.J.SetDevicePtrOwner((diag >= 0) && (diag & 1));
1381 
1382  mem_diag.data.SetHostPtrOwner((diag >= 0) && (diag & 2));
1383  mem_diag.data.SetDevicePtrOwner((diag >= 0) && (diag & 2));
1384 
1385  offdOwner = offd;
1386  mem_offd.I.SetHostPtrOwner((offd >= 0) && (offd & 1));
1387  mem_offd.J.SetHostPtrOwner((offd >= 0) && (offd & 1));
1388 
1389  mem_offd.I.SetDevicePtrOwner((offd >= 0) && (offd & 1));
1390  mem_offd.J.SetDevicePtrOwner((offd >= 0) && (offd & 1));
1391 
1392  mem_offd.data.SetHostPtrOwner((offd >= 0) && (offd & 2));
1393  mem_offd.data.SetDevicePtrOwner((offd >= 0) && (offd & 2));
1394  colMapOwner = colmap;
1395 }
1396 
1398 {
1399 #if MFEM_HYPRE_VERSION <= 22200
1400  if (!A || hypre_ParCSRMatrixOwnsRowStarts(A) ||
1401  (hypre_ParCSRMatrixRowStarts(A) == hypre_ParCSRMatrixColStarts(A) &&
1402  hypre_ParCSRMatrixOwnsColStarts(A)))
1403  {
1404  return;
1405  }
1406 
1407  int row_starts_size;
1408  if (HYPRE_AssumedPartitionCheck())
1409  {
1410  row_starts_size = 2;
1411  }
1412  else
1413  {
1414  MPI_Comm_size(hypre_ParCSRMatrixComm(A), &row_starts_size);
1415  row_starts_size++; // num_proc + 1
1416  }
1417 
1418  HYPRE_BigInt *old_row_starts = hypre_ParCSRMatrixRowStarts(A);
1419  HYPRE_BigInt *new_row_starts = mfem_hypre_CTAlloc_host(HYPRE_BigInt,
1420  row_starts_size);
1421  for (int i = 0; i < row_starts_size; i++)
1422  {
1423  new_row_starts[i] = old_row_starts[i];
1424  }
1425 
1426  hypre_ParCSRMatrixRowStarts(A) = new_row_starts;
1427  hypre_ParCSRMatrixOwnsRowStarts(A) = 1;
1428 
1429  if (hypre_ParCSRMatrixColStarts(A) == old_row_starts)
1430  {
1431  hypre_ParCSRMatrixColStarts(A) = new_row_starts;
1432  hypre_ParCSRMatrixOwnsColStarts(A) = 0;
1433  }
1434 #endif
1435 }
1436 
1438 {
1439 #if MFEM_HYPRE_VERSION <= 22200
1440  if (!A || hypre_ParCSRMatrixOwnsColStarts(A) ||
1441  (hypre_ParCSRMatrixRowStarts(A) == hypre_ParCSRMatrixColStarts(A) &&
1442  hypre_ParCSRMatrixOwnsRowStarts(A)))
1443  {
1444  return;
1445  }
1446 
1447  int col_starts_size;
1448  if (HYPRE_AssumedPartitionCheck())
1449  {
1450  col_starts_size = 2;
1451  }
1452  else
1453  {
1454  MPI_Comm_size(hypre_ParCSRMatrixComm(A), &col_starts_size);
1455  col_starts_size++; // num_proc + 1
1456  }
1457 
1458  HYPRE_BigInt *old_col_starts = hypre_ParCSRMatrixColStarts(A);
1459  HYPRE_BigInt *new_col_starts = mfem_hypre_CTAlloc_host(HYPRE_BigInt,
1460  col_starts_size);
1461  for (int i = 0; i < col_starts_size; i++)
1462  {
1463  new_col_starts[i] = old_col_starts[i];
1464  }
1465 
1466  hypre_ParCSRMatrixColStarts(A) = new_col_starts;
1467 
1468  if (hypre_ParCSRMatrixRowStarts(A) == old_col_starts)
1469  {
1470  hypre_ParCSRMatrixRowStarts(A) = new_col_starts;
1471  hypre_ParCSRMatrixOwnsRowStarts(A) = 1;
1472  hypre_ParCSRMatrixOwnsColStarts(A) = 0;
1473  }
1474  else
1475  {
1476  hypre_ParCSRMatrixOwnsColStarts(A) = 1;
1477  }
1478 #endif
1479 }
1480 
1482 {
1483  const int size = Height();
1484  diag.SetSize(size);
1485 #ifdef HYPRE_USING_GPU
1487  {
1488  MFEM_ASSERT(A->diag->memory_location == HYPRE_MEMORY_DEVICE, "");
1489  double *d_diag = diag.Write();
1490  const HYPRE_Int *A_diag_i = A->diag->i;
1491  const double *A_diag_d = A->diag->data;
1492  mfem::forall(size, [=] MFEM_HOST_DEVICE (int i)
1493  {
1494  d_diag[i] = A_diag_d[A_diag_i[i]];
1495  });
1496  }
1497  else
1498 #endif
1499  {
1500  diag.HostWrite();
1501  HostRead();
1502  for (int j = 0; j < size; j++)
1503  {
1504  diag(j) = A->diag->data[A->diag->i[j]];
1505  MFEM_ASSERT(A->diag->j[A->diag->i[j]] == j,
1506  "the first entry in each row must be the diagonal one");
1507  }
1508  HypreRead();
1509  }
1510 }
1511 
1512 static void MakeSparseMatrixWrapper(int nrows, int ncols,
1513  HYPRE_Int *I, HYPRE_Int *J, double *data,
1514  SparseMatrix &wrapper)
1515 {
1516 #ifndef HYPRE_BIGINT
1517  SparseMatrix tmp(I, J, data, nrows, ncols, false, false, false);
1518 #else
1519  int *mI = Memory<int>(nrows + 1);
1520  for (int i = 0; i <= nrows; i++)
1521  {
1522  mI[i] = internal::to_int(I[i]); // checks for overflow in debug mode
1523  }
1524  const int nnz = mI[nrows];
1525  int *mJ = Memory<int>(nnz);
1526  for (int j = 0; j < nnz; j++)
1527  {
1528  mJ[j] = internal::to_int(J[j]); // checks for overflow in debug mode
1529  }
1530  SparseMatrix tmp(mI, mJ, data, nrows, ncols, true, false, false);
1531 #endif
1532  wrapper.Swap(tmp);
1533 }
1534 
1535 static void MakeWrapper(const hypre_CSRMatrix *mat,
1536  const MemoryIJData &mem,
1537  SparseMatrix &wrapper)
1538 {
1539  const int nrows = internal::to_int(hypre_CSRMatrixNumRows(mat));
1540  const int ncols = internal::to_int(hypre_CSRMatrixNumCols(mat));
1541  const int nnz = internal::to_int(mat->num_nonzeros);
1542  const HYPRE_Int *I = mfem::HostRead(mem.I, nrows + 1);
1543  const HYPRE_Int *J = mfem::HostRead(mem.J, nnz);
1544  const double *data = mfem::HostRead(mem.data, nnz);
1545  MakeSparseMatrixWrapper(nrows, ncols,
1546  const_cast<HYPRE_Int*>(I),
1547  const_cast<HYPRE_Int*>(J),
1548  const_cast<double*>(data),
1549  wrapper);
1550 }
1551 
1553 {
1554  MakeWrapper(A->diag, mem_diag, diag);
1555 }
1556 
1558 {
1559  MakeWrapper(A->offd, mem_offd, offd);
1560  cmap = A->col_map_offd;
1561 }
1562 
1564 {
1565  HostRead();
1566  hypre_CSRMatrix *hypre_merged = hypre_MergeDiagAndOffd(A);
1567  HypreRead();
1568  // Wrap 'hypre_merged' as a SparseMatrix 'merged_tmp'
1569  SparseMatrix merged_tmp;
1570 #if MFEM_HYPRE_VERSION >= 21600
1571  hypre_CSRMatrixBigJtoJ(hypre_merged);
1572 #endif
1573  MakeSparseMatrixWrapper(
1574  internal::to_int(hypre_merged->num_rows),
1575  internal::to_int(hypre_merged->num_cols),
1576  hypre_merged->i,
1577  hypre_merged->j,
1578  hypre_merged->data,
1579  merged_tmp);
1580  // Deep copy 'merged_tmp' to 'merged' so that 'merged' does not need
1581  // 'hypre_merged'
1582  merged = merged_tmp;
1583  merged_tmp.Clear();
1584  hypre_CSRMatrixDestroy(hypre_merged);
1585 }
1586 
1588  bool interleaved_rows,
1589  bool interleaved_cols) const
1590 {
1591  int nr = blocks.NumRows();
1592  int nc = blocks.NumCols();
1593 
1594  hypre_ParCSRMatrix **hypre_blocks = new hypre_ParCSRMatrix*[nr * nc];
1595  HostRead();
1596  internal::hypre_ParCSRMatrixSplit(A, nr, nc, hypre_blocks,
1597  interleaved_rows, interleaved_cols);
1598  HypreRead();
1599 
1600  for (int i = 0; i < nr; i++)
1601  {
1602  for (int j = 0; j < nc; j++)
1603  {
1604  blocks[i][j] = new HypreParMatrix(hypre_blocks[i*nc + j]);
1605  }
1606  }
1607 
1608  delete [] hypre_blocks;
1609 }
1610 
1612 {
1613  hypre_ParCSRMatrix * At;
1614  hypre_ParCSRMatrixTranspose(A, &At, 1);
1615  hypre_ParCSRMatrixSetNumNonzeros(At);
1616 
1617  if (!hypre_ParCSRMatrixCommPkg(At)) { hypre_MatvecCommPkgCreate(At); }
1618 
1619  if ( M() == N() )
1620  {
1621  /* If the matrix is square, make sure that the first entry in each
1622  row is the diagonal one. */
1623  hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(At));
1624  }
1625 
1626  return new HypreParMatrix(At);
1627 }
1628 
1629 #if MFEM_HYPRE_VERSION >= 21800
1631  double threshold) const
1632 {
1633  // hypre_ParCSRMatrixExtractSubmatrixFC works on host only, so we move this
1634  // matrix to host, temporarily:
1635  HostRead();
1636 
1637  if (!(A->comm))
1638  {
1639  hypre_MatvecCommPkgCreate(A);
1640  }
1641 
1642  hypre_ParCSRMatrix *submat;
1643 
1644  // Get number of rows stored on this processor
1645  int local_num_vars = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
1646 
1647  // Form hypre CF-splitting array designating submatrix as F-points (-1)
1648 #ifdef hypre_IntArrayData
1649  // hypre_BoomerAMGCoarseParms needs CF_marker to be hypre_IntArray *
1650  hypre_IntArray *CF_marker;
1651 
1652  CF_marker = hypre_IntArrayCreate(local_num_vars);
1653  hypre_IntArrayInitialize_v2(CF_marker, HYPRE_MEMORY_HOST);
1654  hypre_IntArraySetConstantValues(CF_marker, 1);
1655 #else
1656  Array<HYPRE_Int> CF_marker(local_num_vars);
1657  CF_marker = 1;
1658 #endif
1659  for (int j=0; j<indices.Size(); j++)
1660  {
1661  if (indices[j] > local_num_vars)
1662  {
1663  MFEM_WARNING("WARNING : " << indices[j] << " > " << local_num_vars);
1664  }
1665 #ifdef hypre_IntArrayData
1666  hypre_IntArrayData(CF_marker)[indices[j]] = -1;
1667 #else
1668  CF_marker[indices[j]] = -1;
1669 #endif
1670  }
1671 
1672  // Construct cpts_global array on hypre matrix structure
1673 #if (MFEM_HYPRE_VERSION > 22300) || (MFEM_HYPRE_VERSION == 22300 && HYPRE_DEVELOP_NUMBER >=8)
1674  HYPRE_BigInt cpts_global[2];
1675 
1676  hypre_BoomerAMGCoarseParms(MPI_COMM_WORLD, local_num_vars, 1, NULL,
1677  CF_marker, NULL, cpts_global);
1678 #else
1679  HYPRE_BigInt *cpts_global;
1680  hypre_BoomerAMGCoarseParms(MPI_COMM_WORLD, local_num_vars, 1, NULL,
1681  CF_marker, NULL, &cpts_global);
1682 #endif
1683 
1684  // Extract submatrix into *submat
1685 #ifdef hypre_IntArrayData
1686  hypre_ParCSRMatrixExtractSubmatrixFC(A, hypre_IntArrayData(CF_marker),
1687  cpts_global, "FF", &submat,
1688  threshold);
1689 #else
1690  hypre_ParCSRMatrixExtractSubmatrixFC(A, CF_marker, cpts_global,
1691  "FF", &submat, threshold);
1692 #endif
1693 
1694 #if (MFEM_HYPRE_VERSION <= 22300) && !(MFEM_HYPRE_VERSION == 22300 && HYPRE_DEVELOP_NUMBER >=8)
1695  mfem_hypre_TFree(cpts_global);
1696 #endif
1697 #ifdef hypre_IntArrayData
1698  hypre_IntArrayDestroy(CF_marker);
1699 #endif
1700 
1701  HypreRead(); // restore the matrix location to the default hypre location
1702 
1703  return new HypreParMatrix(submat);
1704 }
1705 #endif
1706 
1708 {
1709 #if (MFEM_HYPRE_VERSION == 22500 && HYPRE_DEVELOP_NUMBER >= 1) || \
1710  (MFEM_HYPRE_VERSION > 22500)
1711 #ifdef HYPRE_USING_GPU
1712  hypre_ParCSRMatrixLocalTranspose(A);
1713 #endif
1714 #endif
1715 }
1716 
1718 {
1719 #if (MFEM_HYPRE_VERSION == 22500 && HYPRE_DEVELOP_NUMBER >= 1) || \
1720  (MFEM_HYPRE_VERSION > 22500)
1721 #ifdef HYPRE_USING_GPU
1722  if (A->diagT)
1723  {
1724  hypre_CSRMatrixDestroy(A->diagT);
1725  A->diagT = NULL;
1726  }
1727  if (A->offdT)
1728  {
1729  hypre_CSRMatrixDestroy(A->offdT);
1730  A->offdT = NULL;
1731  }
1732 #endif
1733 #endif
1734 }
1735 
1737  double a, double b) const
1738 {
1739  x.HypreRead();
1740  (b == 0.0) ? y.HypreWrite() : y.HypreReadWrite();
1741  return hypre_ParCSRMatrixMatvec(a, A, x, b, y);
1742 }
1743 
1744 void HypreParMatrix::Mult(double a, const Vector &x, double b, Vector &y) const
1745 {
1746  MFEM_ASSERT(x.Size() == Width(), "invalid x.Size() = " << x.Size()
1747  << ", expected size = " << Width());
1748  MFEM_ASSERT(y.Size() == Height(), "invalid y.Size() = " << y.Size()
1749  << ", expected size = " << Height());
1750 
1751  if (X == NULL)
1752  {
1753  X = new HypreParVector(A->comm,
1754  GetGlobalNumCols(),
1755  nullptr,
1756  GetColStarts());
1757  Y = new HypreParVector(A->comm,
1758  GetGlobalNumRows(),
1759  nullptr,
1760  GetRowStarts());
1761  }
1762 
1763  const bool xshallow = CanShallowCopy(x.GetMemory(), GetHypreMemoryClass());
1764  const bool yshallow = CanShallowCopy(y.GetMemory(), GetHypreMemoryClass());
1765 
1766  if (xshallow)
1767  {
1768  X->WrapMemoryRead(x.GetMemory());
1769  }
1770  else
1771  {
1772  if (auxX.Empty()) { auxX.New(NumCols(), GetHypreMemoryType()); }
1773  auxX.CopyFrom(x.GetMemory(), auxX.Capacity()); // Deep copy
1774  X->WrapMemoryRead(auxX);
1775  }
1776 
1777  if (yshallow)
1778  {
1779  if (b != 0.0) { Y->WrapMemoryReadWrite(y.GetMemory()); }
1780  else { Y->WrapMemoryWrite(y.GetMemory()); }
1781  }
1782  else
1783  {
1784  if (auxY.Empty()) { auxY.New(NumRows(), GetHypreMemoryType()); }
1785  if (b != 0.0)
1786  {
1787  auxY.CopyFrom(y.GetMemory(), auxY.Capacity()); // Deep copy
1788  Y->WrapMemoryReadWrite(auxY);
1789  }
1790  else
1791  {
1792  Y->WrapMemoryWrite(auxY);
1793  }
1794  }
1795 
1796  hypre_ParCSRMatrixMatvec(a, A, *X, b, *Y);
1797 
1798  if (!yshallow) { y = *Y; } // Deep copy
1799 }
1800 
1802  double b, Vector &y) const
1803 {
1804  MFEM_ASSERT(x.Size() == Height(), "invalid x.Size() = " << x.Size()
1805  << ", expected size = " << Height());
1806  MFEM_ASSERT(y.Size() == Width(), "invalid y.Size() = " << y.Size()
1807  << ", expected size = " << Width());
1808 
1809  // Note: x has the dimensions of Y (height), and
1810  // y has the dimensions of X (width)
1811  if (X == NULL)
1812  {
1813  X = new HypreParVector(A->comm,
1814  GetGlobalNumCols(),
1815  nullptr,
1816  GetColStarts());
1817  Y = new HypreParVector(A->comm,
1818  GetGlobalNumRows(),
1819  nullptr,
1820  GetRowStarts());
1821  }
1822 
1823  const bool xshallow = CanShallowCopy(x.GetMemory(), GetHypreMemoryClass());
1824  const bool yshallow = CanShallowCopy(y.GetMemory(), GetHypreMemoryClass());
1825 
1826  // x <--> Y
1827  if (xshallow)
1828  {
1829  Y->WrapMemoryRead(x.GetMemory());
1830  }
1831  else
1832  {
1833  if (auxY.Empty()) { auxY.New(NumRows(), GetHypreMemoryType()); }
1834  auxY.CopyFrom(x.GetMemory(), auxY.Capacity()); // Deep copy
1835  Y->WrapMemoryRead(auxY);
1836  }
1837 
1838  // y <--> X
1839  if (yshallow)
1840  {
1841  if (b != 0.0) { X->WrapMemoryReadWrite(y.GetMemory()); }
1842  else { X->WrapMemoryWrite(y.GetMemory()); }
1843  }
1844  else
1845  {
1846  if (auxX.Empty()) { auxX.New(NumCols(), GetHypreMemoryType()); }
1847  if (b != 0.0)
1848  {
1849  auxX.CopyFrom(y.GetMemory(), auxX.Capacity()); // Deep copy
1850  X->WrapMemoryReadWrite(auxX);
1851  }
1852  else
1853  {
1854  X->WrapMemoryWrite(auxX);
1855  }
1856  }
1857 
1859 
1860  hypre_ParCSRMatrixMatvecT(a, A, *Y, b, *X);
1861 
1862  if (!yshallow) { y = *X; } // Deep copy
1863 }
1864 
1865 HYPRE_Int HypreParMatrix::Mult(HYPRE_ParVector x, HYPRE_ParVector y,
1866  double a, double b) const
1867 {
1868  return hypre_ParCSRMatrixMatvec(a, A, (hypre_ParVector *) x, b,
1869  (hypre_ParVector *) y);
1870 }
1871 
1873  double a, double b) const
1874 {
1876  x.HypreRead();
1877  (b == 0.0) ? y.HypreWrite() : y.HypreReadWrite();
1878  return hypre_ParCSRMatrixMatvecT(a, A, x, b, y);
1879 }
1880 
1881 void HypreParMatrix::AbsMult(double a, const Vector &x,
1882  double b, Vector &y) const
1883 {
1884  MFEM_ASSERT(x.Size() == Width(), "invalid x.Size() = " << x.Size()
1885  << ", expected size = " << Width());
1886  MFEM_ASSERT(y.Size() == Height(), "invalid y.Size() = " << y.Size()
1887  << ", expected size = " << Height());
1888 
1889  auto x_data = x.HostRead();
1890  auto y_data = (b == 0.0) ? y.HostWrite() : y.HostReadWrite();
1891 
1892  HostRead();
1893  internal::hypre_ParCSRMatrixAbsMatvec(A, a, const_cast<double*>(x_data),
1894  b, y_data);
1895  HypreRead();
1896 }
1897 
1899  double b, Vector &y) const
1900 {
1901  MFEM_ASSERT(x.Size() == Height(), "invalid x.Size() = " << x.Size()
1902  << ", expected size = " << Height());
1903  MFEM_ASSERT(y.Size() == Width(), "invalid y.Size() = " << y.Size()
1904  << ", expected size = " << Width());
1905 
1906  auto x_data = x.HostRead();
1907  auto y_data = (b == 0.0) ? y.HostWrite() : y.HostReadWrite();
1908 
1909  HostRead();
1910  internal::hypre_ParCSRMatrixAbsMatvecT(A, a, const_cast<double*>(x_data),
1911  b, y_data);
1912  HypreRead();
1913 }
1914 
1916  HYPRE_BigInt* row_starts) const
1917 {
1918  const bool assumed_partition = HYPRE_AssumedPartitionCheck();
1919  const bool row_starts_given = (row_starts != NULL);
1920  if (!row_starts_given)
1921  {
1922  row_starts = hypre_ParCSRMatrixRowStarts(A);
1923  MFEM_VERIFY(D.Height() == hypre_CSRMatrixNumRows(A->diag),
1924  "the matrix D is NOT compatible with the row starts of"
1925  " this HypreParMatrix, row_starts must be given.");
1926  }
1927  else
1928  {
1929  int offset;
1930  if (assumed_partition)
1931  {
1932  offset = 0;
1933  }
1934  else
1935  {
1936  MPI_Comm_rank(GetComm(), &offset);
1937  }
1938  int local_num_rows = row_starts[offset+1]-row_starts[offset];
1939  MFEM_VERIFY(local_num_rows == D.Height(), "the number of rows in D is "
1940  " not compatible with the given row_starts");
1941  }
1942  // D.Width() will be checked for compatibility by the SparseMatrix
1943  // multiplication function, mfem::Mult(), called below.
1944 
1945  int part_size;
1946  HYPRE_BigInt global_num_rows;
1947  if (assumed_partition)
1948  {
1949  part_size = 2;
1950  if (row_starts_given)
1951  {
1952  global_num_rows = row_starts[2];
1953  // Here, we use row_starts[2], so row_starts must come from the
1954  // methods GetDofOffsets/GetTrueDofOffsets of ParFiniteElementSpace
1955  // (HYPRE's partitions have only 2 entries).
1956  }
1957  else
1958  {
1959  global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
1960  }
1961  }
1962  else
1963  {
1964  MPI_Comm_size(GetComm(), &part_size);
1965  global_num_rows = row_starts[part_size];
1966  part_size++;
1967  }
1968 
1969  HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A);
1970  HYPRE_BigInt *col_map_offd;
1971 
1972  // get the diag and offd blocks as SparseMatrix wrappers
1973  SparseMatrix A_diag, A_offd;
1974  GetDiag(A_diag);
1975  GetOffd(A_offd, col_map_offd);
1976 
1977  // Multiply the diag and offd blocks with D -- these products will be the
1978  // diag and offd blocks of the output HypreParMatrix, DA.
1979  SparseMatrix* DA_diag = mfem::Mult(D, A_diag);
1980  SparseMatrix* DA_offd = mfem::Mult(D, A_offd);
1981 
1982  // Copy row_starts, col_starts, and col_map_offd; ownership of these arrays
1983  // will be given to the newly constructed output HypreParMatrix, DA.
1984  HYPRE_BigInt *new_row_starts =
1985  DuplicateAs<HYPRE_BigInt>(row_starts, part_size, false);
1986  HYPRE_BigInt *new_col_starts =
1987  (row_starts == col_starts ? new_row_starts :
1988  DuplicateAs<HYPRE_BigInt>(col_starts, part_size, false));
1989  HYPRE_BigInt *new_col_map_offd =
1990  DuplicateAs<HYPRE_BigInt>(col_map_offd, A_offd.Width());
1991 
1992  // Ownership of DA_diag and DA_offd is transferred to the HypreParMatrix
1993  // constructor.
1994  const bool own_diag_offd = true;
1995 
1996  // Create the output HypreParMatrix, DA, from DA_diag and DA_offd
1997  HypreParMatrix* DA =
1998  new HypreParMatrix(GetComm(),
1999  global_num_rows, hypre_ParCSRMatrixGlobalNumCols(A),
2000  new_row_starts, new_col_starts,
2001  DA_diag, DA_offd, new_col_map_offd,
2002  own_diag_offd);
2003 
2004 #if MFEM_HYPRE_VERSION <= 22200
2005  // Give ownership of row_starts, col_starts, and col_map_offd to DA
2006  hypre_ParCSRMatrixSetRowStartsOwner(DA->A, 1);
2007  hypre_ParCSRMatrixSetColStartsOwner(DA->A, 1);
2008 #else
2009  mfem_hypre_TFree_host(new_row_starts);
2010  mfem_hypre_TFree_host(new_col_starts);
2011 #endif
2012  DA->colMapOwner = 1;
2013 
2014  return DA;
2015 }
2016 
2018 {
2019  if (hypre_CSRMatrixNumRows(A->diag) != hypre_CSRMatrixNumRows(A->offd))
2020  {
2021  mfem_error("Row does not match");
2022  }
2023 
2024  if (hypre_CSRMatrixNumRows(A->diag) != diag.Size())
2025  {
2026  mfem_error("Note the Vector diag is not of compatible dimensions with A\n");
2027  }
2028 
2029  HostReadWrite();
2030  diag.HostRead();
2031 
2032  int size = Height();
2033  double *Adiag_data = hypre_CSRMatrixData(A->diag);
2034  HYPRE_Int *Adiag_i = hypre_CSRMatrixI(A->diag);
2035 
2036  double *Aoffd_data = hypre_CSRMatrixData(A->offd);
2037  HYPRE_Int *Aoffd_i = hypre_CSRMatrixI(A->offd);
2038  double val;
2039  HYPRE_Int jj;
2040  for (int i(0); i < size; ++i)
2041  {
2042  val = diag[i];
2043  for (jj = Adiag_i[i]; jj < Adiag_i[i+1]; ++jj)
2044  {
2045  Adiag_data[jj] *= val;
2046  }
2047  for (jj = Aoffd_i[i]; jj < Aoffd_i[i+1]; ++jj)
2048  {
2049  Aoffd_data[jj] *= val;
2050  }
2051  }
2052 
2053  HypreRead();
2054 }
2055 
2057 {
2058  if (hypre_CSRMatrixNumRows(A->diag) != hypre_CSRMatrixNumRows(A->offd))
2059  {
2060  mfem_error("Row does not match");
2061  }
2062 
2063  if (hypre_CSRMatrixNumRows(A->diag) != diag.Size())
2064  {
2065  mfem_error("Note the Vector diag is not of compatible dimensions with A\n");
2066  }
2067 
2068  HostReadWrite();
2069  diag.HostRead();
2070 
2071  int size = Height();
2072  double *Adiag_data = hypre_CSRMatrixData(A->diag);
2073  HYPRE_Int *Adiag_i = hypre_CSRMatrixI(A->diag);
2074 
2075 
2076  double *Aoffd_data = hypre_CSRMatrixData(A->offd);
2077  HYPRE_Int *Aoffd_i = hypre_CSRMatrixI(A->offd);
2078  double val;
2079  HYPRE_Int jj;
2080  for (int i(0); i < size; ++i)
2081  {
2082 #ifdef MFEM_DEBUG
2083  if (0.0 == diag(i))
2084  {
2085  mfem_error("HypreParMatrix::InvDiagScale : Division by 0");
2086  }
2087 #endif
2088  val = 1./diag(i);
2089  for (jj = Adiag_i[i]; jj < Adiag_i[i+1]; ++jj)
2090  {
2091  Adiag_data[jj] *= val;
2092  }
2093  for (jj = Aoffd_i[i]; jj < Aoffd_i[i+1]; ++jj)
2094  {
2095  Aoffd_data[jj] *= val;
2096  }
2097  }
2098 
2099  HypreRead();
2100 }
2101 
2103 {
2104  if (hypre_CSRMatrixNumRows(A->diag) != hypre_CSRMatrixNumRows(A->offd))
2105  {
2106  mfem_error("Row does not match");
2107  }
2108 
2109  HostReadWrite();
2110 
2111  HYPRE_Int size=hypre_CSRMatrixNumRows(A->diag);
2112  HYPRE_Int jj;
2113 
2114  double *Adiag_data = hypre_CSRMatrixData(A->diag);
2115  HYPRE_Int *Adiag_i = hypre_CSRMatrixI(A->diag);
2116  for (jj = 0; jj < Adiag_i[size]; ++jj)
2117  {
2118  Adiag_data[jj] *= s;
2119  }
2120 
2121  double *Aoffd_data = hypre_CSRMatrixData(A->offd);
2122  HYPRE_Int *Aoffd_i = hypre_CSRMatrixI(A->offd);
2123  for (jj = 0; jj < Aoffd_i[size]; ++jj)
2124  {
2125  Aoffd_data[jj] *= s;
2126  }
2127 
2128  HypreRead();
2129 }
2130 
2131 static void get_sorted_rows_cols(const Array<int> &rows_cols,
2132  Array<HYPRE_Int> &hypre_sorted)
2133 {
2134  rows_cols.HostRead();
2135  hypre_sorted.SetSize(rows_cols.Size());
2136  bool sorted = true;
2137  for (int i = 0; i < rows_cols.Size(); i++)
2138  {
2139  hypre_sorted[i] = rows_cols[i];
2140  if (i && rows_cols[i-1] > rows_cols[i]) { sorted = false; }
2141  }
2142  if (!sorted) { hypre_sorted.Sort(); }
2143 }
2144 
2145 void HypreParMatrix::Threshold(double threshold)
2146 {
2147  int ierr = 0;
2148 
2149  MPI_Comm comm;
2150  hypre_CSRMatrix * csr_A;
2151  hypre_CSRMatrix * csr_A_wo_z;
2152  hypre_ParCSRMatrix * parcsr_A_ptr;
2153  HYPRE_BigInt * row_starts = NULL; HYPRE_BigInt * col_starts = NULL;
2154  HYPRE_BigInt row_start = -1; HYPRE_BigInt row_end = -1;
2155  HYPRE_BigInt col_start = -1; HYPRE_BigInt col_end = -1;
2156 
2157  comm = hypre_ParCSRMatrixComm(A);
2158 
2159  ierr += hypre_ParCSRMatrixGetLocalRange(A,
2160  &row_start,&row_end,
2161  &col_start,&col_end );
2162 
2163  row_starts = hypre_ParCSRMatrixRowStarts(A);
2164  col_starts = hypre_ParCSRMatrixColStarts(A);
2165 
2166 #if MFEM_HYPRE_VERSION <= 22200
2167  bool old_owns_row = hypre_ParCSRMatrixOwnsRowStarts(A);
2168  bool old_owns_col = hypre_ParCSRMatrixOwnsColStarts(A);
2169 #endif
2170  HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
2171  HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
2172  parcsr_A_ptr = hypre_ParCSRMatrixCreate(comm, global_num_rows,
2173  global_num_cols,
2174  row_starts, col_starts,
2175  0, 0, 0);
2176 #if MFEM_HYPRE_VERSION <= 22200
2177  hypre_ParCSRMatrixOwnsRowStarts(parcsr_A_ptr) = old_owns_row;
2178  hypre_ParCSRMatrixOwnsColStarts(parcsr_A_ptr) = old_owns_col;
2179  hypre_ParCSRMatrixOwnsRowStarts(A) = 0;
2180  hypre_ParCSRMatrixOwnsColStarts(A) = 0;
2181 #endif
2182 
2183  csr_A = hypre_MergeDiagAndOffd(A);
2184 
2185  // Free A, if owned
2186  Destroy();
2187  Init();
2188 
2189  csr_A_wo_z = hypre_CSRMatrixDeleteZeros(csr_A,threshold);
2190 
2191  /* hypre_CSRMatrixDeleteZeros will return a NULL pointer rather than a usable
2192  CSR matrix if it finds no non-zeros */
2193  if (csr_A_wo_z == NULL)
2194  {
2195  csr_A_wo_z = csr_A;
2196  }
2197  else
2198  {
2199  ierr += hypre_CSRMatrixDestroy(csr_A);
2200  }
2201 
2202  /* TODO: GenerateDiagAndOffd() uses an int array of size equal to the number
2203  of columns in csr_A_wo_z which is the global number of columns in A. This
2204  does not scale well. */
2205  ierr += GenerateDiagAndOffd(csr_A_wo_z,parcsr_A_ptr,
2206  col_start,col_end);
2207 
2208  ierr += hypre_CSRMatrixDestroy(csr_A_wo_z);
2209 
2210  MFEM_VERIFY(ierr == 0, "");
2211 
2212  A = parcsr_A_ptr;
2213 
2214  hypre_ParCSRMatrixSetNumNonzeros(A);
2215  /* Make sure that the first entry in each row is the diagonal one. */
2216 #if MFEM_HYPRE_VERSION <= 22200
2217  if (row_starts == col_starts)
2218 #else
2219  if ((row_starts[0] == col_starts[0]) &&
2220  (row_starts[1] == col_starts[1]))
2221 #endif
2222  {
2223  hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
2224  }
2225  if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); }
2226  height = GetNumRows();
2227  width = GetNumCols();
2228 }
2229 
2231 {
2232  HYPRE_Int old_err = hypre_error_flag;
2233  hypre_error_flag = 0;
2234 
2235 #if MFEM_HYPRE_VERSION < 21400
2236 
2237  double threshold = 0.0;
2238  if (tol > 0.0)
2239  {
2240  HYPRE_Int *diag_I = A->diag->i, *offd_I = A->offd->i;
2241  double *diag_d = A->diag->data, *offd_d = A->offd->data;
2242  HYPRE_Int local_num_rows = A->diag->num_rows;
2243  double max_l2_row_norm = 0.0;
2244  Vector row;
2245  for (HYPRE_Int r = 0; r < local_num_rows; r++)
2246  {
2247  row.SetDataAndSize(diag_d + diag_I[r], diag_I[r+1]-diag_I[r]);
2248  double l2_row_norm = row.Norml2();
2249  row.SetDataAndSize(offd_d + offd_I[r], offd_I[r+1]-offd_I[r]);
2250  l2_row_norm = std::hypot(l2_row_norm, row.Norml2());
2251  max_l2_row_norm = std::max(max_l2_row_norm, l2_row_norm);
2252  }
2253  double loc_max_l2_row_norm = max_l2_row_norm;
2254  MPI_Allreduce(&loc_max_l2_row_norm, &max_l2_row_norm, 1, MPI_DOUBLE,
2255  MPI_MAX, A->comm);
2256  threshold = tol * max_l2_row_norm;
2257  }
2258 
2259  Threshold(threshold);
2260 
2261 #elif MFEM_HYPRE_VERSION < 21800
2262 
2263  HYPRE_Int err_flag = hypre_ParCSRMatrixDropSmallEntries(A, tol);
2264  MFEM_VERIFY(!err_flag, "error encountered: error code = " << err_flag);
2265 
2266 #else
2267 
2268  HYPRE_Int err_flag = hypre_ParCSRMatrixDropSmallEntries(A, tol, 2);
2269  MFEM_VERIFY(!err_flag, "error encountered: error code = " << err_flag);
2270 
2271 #endif
2272 
2273  hypre_error_flag = old_err;
2274 }
2275 
2277  const HypreParVector &x,
2278  HypreParVector &b)
2279 {
2280  Array<HYPRE_Int> rc_sorted;
2281  get_sorted_rows_cols(rows_cols, rc_sorted);
2282 
2283  internal::hypre_ParCSRMatrixEliminateAXB(
2284  A, rc_sorted.Size(), rc_sorted.GetData(), x, b);
2285 }
2286 
2288 {
2289  Array<HYPRE_Int> rc_sorted;
2290  get_sorted_rows_cols(rows_cols, rc_sorted);
2291 
2292  hypre_ParCSRMatrix* Ae;
2293  HostReadWrite();
2294  internal::hypre_ParCSRMatrixEliminateAAe(
2295  A, &Ae, rc_sorted.Size(), rc_sorted.GetData());
2296  HypreRead();
2297 
2298  return new HypreParMatrix(Ae, true);
2299 }
2300 
2302 {
2303  Array<HYPRE_Int> rc_sorted;
2304  get_sorted_rows_cols(cols, rc_sorted);
2305 
2306  hypre_ParCSRMatrix* Ae;
2307  HostReadWrite();
2308  internal::hypre_ParCSRMatrixEliminateAAe(
2309  A, &Ae, rc_sorted.Size(), rc_sorted.GetData(), 1);
2310  HypreRead();
2311 
2312  return new HypreParMatrix(Ae, true);
2313 }
2314 
2316 {
2317  if (rows.Size() > 0)
2318  {
2319  Array<HYPRE_Int> r_sorted;
2320  get_sorted_rows_cols(rows, r_sorted);
2321  HostReadWrite();
2322  internal::hypre_ParCSRMatrixEliminateRows(A, r_sorted.Size(),
2323  r_sorted.GetData());
2324  HypreRead();
2325  }
2326 }
2327 
2329  const Array<int> &ess_dof_list,
2330  const Vector &x, Vector &b) const
2331 {
2332  // b -= Ae*x
2333  Ae.Mult(-1.0, x, 1.0, b);
2334 
2335  // All operations below are local, so we can skip them if ess_dof_list is
2336  // empty on this processor to avoid potential host <--> device transfers.
2337  if (ess_dof_list.Size() == 0) { return; }
2338 
2339  HostRead();
2340  hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
2341  double *data = hypre_CSRMatrixData(A_diag);
2342  HYPRE_Int *I = hypre_CSRMatrixI(A_diag);
2343 #ifdef MFEM_DEBUG
2344  HYPRE_Int *J = hypre_CSRMatrixJ(A_diag);
2345  hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
2346  HYPRE_Int *I_offd = hypre_CSRMatrixI(A_offd);
2347  double *data_offd = hypre_CSRMatrixData(A_offd);
2348 #endif
2349 
2350  ess_dof_list.HostRead();
2351  x.HostRead();
2352  b.HostReadWrite();
2353 
2354  for (int i = 0; i < ess_dof_list.Size(); i++)
2355  {
2356  int r = ess_dof_list[i];
2357  b(r) = data[I[r]] * x(r);
2358 #ifdef MFEM_DEBUG
2359  MFEM_ASSERT(I[r] < I[r+1], "empty row found!");
2360  // Check that in the rows specified by the ess_dof_list, the matrix A has
2361  // only one entry -- the diagonal.
2362  // if (I[r+1] != I[r]+1 || J[I[r]] != r || I_offd[r] != I_offd[r+1])
2363  if (J[I[r]] != r)
2364  {
2365  MFEM_ABORT("the diagonal entry must be the first entry in the row!");
2366  }
2367  for (int j = I[r]+1; j < I[r+1]; j++)
2368  {
2369  if (data[j] != 0.0)
2370  {
2371  MFEM_ABORT("all off-diagonal entries must be zero!");
2372  }
2373  }
2374  for (int j = I_offd[r]; j < I_offd[r+1]; j++)
2375  {
2376  if (data_offd[j] != 0.0)
2377  {
2378  MFEM_ABORT("all off-diagonal entries must be zero!");
2379  }
2380  }
2381 #endif
2382  }
2383  HypreRead();
2384 }
2385 
2387  DiagonalPolicy diag_policy)
2388 {
2389  hypre_ParCSRMatrix *A_hypre = *this;
2390  HypreReadWrite();
2391 
2392  hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A_hypre);
2393  hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A_hypre);
2394 
2395  HYPRE_Int diag_nrows = hypre_CSRMatrixNumRows(diag);
2396  HYPRE_Int offd_ncols = hypre_CSRMatrixNumCols(offd);
2397 
2398  const int n_ess_dofs = ess_dofs.Size();
2399  const auto ess_dofs_d = ess_dofs.GetMemory().Read(
2400  GetHypreMemoryClass(), n_ess_dofs);
2401 
2402  // Start communication to figure out which columns need to be eliminated in
2403  // the off-diagonal block
2404  hypre_ParCSRCommHandle *comm_handle;
2405  HYPRE_Int *int_buf_data, *eliminate_row, *eliminate_col;
2406  {
2407  eliminate_row = mfem_hypre_CTAlloc(HYPRE_Int, diag_nrows);
2408  eliminate_col = mfem_hypre_CTAlloc(HYPRE_Int, offd_ncols);
2409 
2410  // Make sure A has a communication package
2411  hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A_hypre);
2412  if (!comm_pkg)
2413  {
2414  hypre_MatvecCommPkgCreate(A_hypre);
2415  comm_pkg = hypre_ParCSRMatrixCommPkg(A_hypre);
2416  }
2417 
2418  // Which of the local rows are to be eliminated?
2419  MFEM_HYPRE_FORALL(i, diag_nrows, eliminate_row[i] = 0; );
2420  MFEM_HYPRE_FORALL(i, n_ess_dofs, eliminate_row[ess_dofs_d[i]] = 1; );
2421 
2422  // Use a matvec communication pattern to find (in eliminate_col) which of
2423  // the local offd columns are to be eliminated
2424 
2425  HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
2426  HYPRE_Int int_buf_sz = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
2427  int_buf_data = mfem_hypre_CTAlloc(HYPRE_Int, int_buf_sz);
2428 
2429  HYPRE_Int *send_map_elmts;
2430 #if defined(HYPRE_USING_GPU)
2431  hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg);
2432  send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg);
2433 #else
2434  send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg);
2435 #endif
2436  MFEM_HYPRE_FORALL(i, int_buf_sz,
2437  {
2438  int k = send_map_elmts[i];
2439  int_buf_data[i] = eliminate_row[k];
2440  });
2441 
2442 #if defined(HYPRE_USING_GPU)
2443  // Try to use device-aware MPI for the communication if available
2444  comm_handle = hypre_ParCSRCommHandleCreate_v2(
2445  11, comm_pkg, HYPRE_MEMORY_DEVICE, int_buf_data,
2446  HYPRE_MEMORY_DEVICE, eliminate_col);
2447 #else
2448  comm_handle = hypre_ParCSRCommHandleCreate(
2449  11, comm_pkg, int_buf_data, eliminate_col );
2450 #endif
2451  }
2452 
2453  // Eliminate rows and columns in the diagonal block
2454  {
2455  const auto I = diag->i;
2456  const auto J = diag->j;
2457  auto data = diag->data;
2458 
2459  MFEM_HYPRE_FORALL(i, n_ess_dofs,
2460  {
2461  const int idof = ess_dofs_d[i];
2462  for (int j=I[idof]; j<I[idof+1]; ++j)
2463  {
2464  const int jdof = J[j];
2465  if (jdof == idof)
2466  {
2467  if (diag_policy == DiagonalPolicy::DIAG_ONE)
2468  {
2469  data[j] = 1.0;
2470  }
2471  else if (diag_policy == DiagonalPolicy::DIAG_ZERO)
2472  {
2473  data[j] = 0.0;
2474  }
2475  // else (diag_policy == DiagonalPolicy::DIAG_KEEP)
2476  }
2477  else
2478  {
2479  data[j] = 0.0;
2480  for (int k=I[jdof]; k<I[jdof+1]; ++k)
2481  {
2482  if (J[k] == idof)
2483  {
2484  data[k] = 0.0;
2485  break;
2486  }
2487  }
2488  }
2489  }
2490  });
2491  }
2492 
2493  // Eliminate rows in the off-diagonal block
2494  {
2495  const auto I = offd->i;
2496  auto data = offd->data;
2497  MFEM_HYPRE_FORALL(i, n_ess_dofs,
2498  {
2499  const int idof = ess_dofs_d[i];
2500  for (int j=I[idof]; j<I[idof+1]; ++j)
2501  {
2502  data[j] = 0.0;
2503  }
2504  });
2505  }
2506 
2507  // Wait for MPI communication to finish
2508  hypre_ParCSRCommHandleDestroy(comm_handle);
2509  mfem_hypre_TFree(int_buf_data);
2510  mfem_hypre_TFree(eliminate_row);
2511 
2512  // Eliminate columns in the off-diagonal block
2513  {
2514  const int nrows_offd = hypre_CSRMatrixNumRows(offd);
2515  const auto I = offd->i;
2516  const auto J = offd->j;
2517  auto data = offd->data;
2518  MFEM_HYPRE_FORALL(i, nrows_offd,
2519  {
2520  for (int j=I[i]; j<I[i+1]; ++j)
2521  {
2522  data[j] *= 1 - eliminate_col[J[j]];
2523  }
2524  });
2525  }
2526 
2527  mfem_hypre_TFree(eliminate_col);
2528 }
2529 
2530 void HypreParMatrix::Print(const char *fname, HYPRE_Int offi,
2531  HYPRE_Int offj) const
2532 {
2533  HostRead();
2534  hypre_ParCSRMatrixPrintIJ(A,offi,offj,fname);
2535  HypreRead();
2536 }
2537 
2538 void HypreParMatrix::Read(MPI_Comm comm, const char *fname)
2539 {
2540  Destroy();
2541  Init();
2542 
2543  HYPRE_Int base_i, base_j;
2544  hypre_ParCSRMatrixReadIJ(comm, fname, &base_i, &base_j, &A);
2545  hypre_ParCSRMatrixSetNumNonzeros(A);
2546 
2547  if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); }
2548 
2549  height = GetNumRows();
2550  width = GetNumCols();
2551 }
2552 
2553 void HypreParMatrix::Read_IJMatrix(MPI_Comm comm, const char *fname)
2554 {
2555  Destroy();
2556  Init();
2557 
2558  HYPRE_IJMatrix A_ij;
2559  HYPRE_IJMatrixRead(fname, comm, 5555, &A_ij); // HYPRE_PARCSR = 5555
2560 
2561  HYPRE_ParCSRMatrix A_parcsr;
2562  HYPRE_IJMatrixGetObject(A_ij, (void**) &A_parcsr);
2563 
2564  A = (hypre_ParCSRMatrix*)A_parcsr;
2565 
2566  hypre_ParCSRMatrixSetNumNonzeros(A);
2567 
2568  if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); }
2569 
2570  height = GetNumRows();
2571  width = GetNumCols();
2572 }
2573 
2574 void HypreParMatrix::PrintCommPkg(std::ostream &os) const
2575 {
2576  hypre_ParCSRCommPkg *comm_pkg = A->comm_pkg;
2577  MPI_Comm comm = A->comm;
2578  char c = '\0';
2579  const int tag = 46801;
2580  int myid, nproc;
2581  MPI_Comm_rank(comm, &myid);
2582  MPI_Comm_size(comm, &nproc);
2583 
2584  if (myid != 0)
2585  {
2586  MPI_Recv(&c, 1, MPI_CHAR, myid-1, tag, comm, MPI_STATUS_IGNORE);
2587  }
2588  else
2589  {
2590  os << "\nHypreParMatrix: hypre_ParCSRCommPkg:\n";
2591  }
2592  os << "Rank " << myid << ":\n"
2593  " number of sends = " << comm_pkg->num_sends <<
2594  " (" << sizeof(double)*comm_pkg->send_map_starts[comm_pkg->num_sends] <<
2595  " bytes)\n"
2596  " number of recvs = " << comm_pkg->num_recvs <<
2597  " (" << sizeof(double)*comm_pkg->recv_vec_starts[comm_pkg->num_recvs] <<
2598  " bytes)\n";
2599  if (myid != nproc-1)
2600  {
2601  os << std::flush;
2602  MPI_Send(&c, 1, MPI_CHAR, myid+1, tag, comm);
2603  }
2604  else
2605  {
2606  os << std::endl;
2607  }
2608  MPI_Barrier(comm);
2609 }
2610 
2611 void HypreParMatrix::PrintHash(std::ostream &os) const
2612 {
2613  HashFunction hf;
2614 
2615  os << "global number of rows : " << A->global_num_rows << '\n'
2616  << "global number of columns : " << A->global_num_cols << '\n'
2617  << "first row index : " << A->first_row_index << '\n'
2618  << " last row index : " << A->last_row_index << '\n'
2619  << "first col diag : " << A->first_col_diag << '\n'
2620  << " last col diag : " << A->last_col_diag << '\n'
2621  << "number of nonzeros : " << A->num_nonzeros << '\n';
2622  // diagonal, off-diagonal
2623  hypre_CSRMatrix *csr = A->diag;
2624  const char *csr_name = "diag";
2625  for (int m = 0; m < 2; m++)
2626  {
2627  auto csr_nnz = csr->i[csr->num_rows];
2628  os << csr_name << " num rows : " << csr->num_rows << '\n'
2629  << csr_name << " num cols : " << csr->num_cols << '\n'
2630  << csr_name << " num nnz : " << csr->num_nonzeros << '\n'
2631  << csr_name << " i last : " << csr_nnz
2632  << (csr_nnz == csr->num_nonzeros ?
2633  " [good]" : " [** BAD **]") << '\n';
2634  hf.AppendInts(csr->i, csr->num_rows + 1);
2635  os << csr_name << " i hash : " << hf.GetHash() << '\n';
2636  os << csr_name << " j hash : ";
2637  if (csr->j == nullptr)
2638  {
2639  os << "(null)\n";
2640  }
2641  else
2642  {
2643  hf.AppendInts(csr->j, csr_nnz);
2644  os << hf.GetHash() << '\n';
2645  }
2646 #if MFEM_HYPRE_VERSION >= 21600
2647  os << csr_name << " big j hash : ";
2648  if (csr->big_j == nullptr)
2649  {
2650  os << "(null)\n";
2651  }
2652  else
2653  {
2654  hf.AppendInts(csr->big_j, csr_nnz);
2655  os << hf.GetHash() << '\n';
2656  }
2657 #endif
2658  os << csr_name << " data hash : ";
2659  if (csr->data == nullptr)
2660  {
2661  os << "(null)\n";
2662  }
2663  else
2664  {
2665  hf.AppendDoubles(csr->data, csr_nnz);
2666  os << hf.GetHash() << '\n';
2667  }
2668 
2669  csr = A->offd;
2670  csr_name = "offd";
2671  }
2672 
2673  hf.AppendInts(A->col_map_offd, A->offd->num_cols);
2674  os << "col map offd hash : " << hf.GetHash() << '\n';
2675 }
2676 
2677 inline void delete_hypre_ParCSRMatrixColMapOffd(hypre_ParCSRMatrix *A)
2678 {
2679  HYPRE_BigInt *A_col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
2680  int size = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A));
2681  Memory<HYPRE_BigInt>(A_col_map_offd, size, true).Delete();
2682 }
2683 
2684 void HypreParMatrix::Destroy()
2685 {
2686  if ( X != NULL ) { delete X; }
2687  if ( Y != NULL ) { delete Y; }
2688  auxX.Delete();
2689  auxY.Delete();
2690 
2691  if (A == NULL) { return; }
2692 
2693 #ifdef HYPRE_USING_GPU
2694  if (ParCSROwner && (diagOwner < 0 || offdOwner < 0))
2695  {
2696  // Put the "host" or "hypre" pointers in {i,j,data} of A->{diag,offd}, so
2697  // that they can be destroyed by hypre when hypre_ParCSRMatrixDestroy(A)
2698  // is called below.
2699 
2700  // Check that if both diagOwner and offdOwner are negative then they have
2701  // the same value.
2702  MFEM_VERIFY(!(diagOwner < 0 && offdOwner < 0) || diagOwner == offdOwner,
2703  "invalid state");
2704 
2705  MemoryClass mc = (diagOwner == -1 || offdOwner == -1) ?
2707  Write(mc, diagOwner < 0, offdOwner <0);
2708  }
2709 #endif
2710 
2711  mem_diag.I.Delete();
2712  mem_diag.J.Delete();
2713  mem_diag.data.Delete();
2714  if (diagOwner >= 0)
2715  {
2716  hypre_CSRMatrixI(A->diag) = NULL;
2717  hypre_CSRMatrixJ(A->diag) = NULL;
2718  hypre_CSRMatrixData(A->diag) = NULL;
2719  }
2720  mem_offd.I.Delete();
2721  mem_offd.J.Delete();
2722  mem_offd.data.Delete();
2723  if (offdOwner >= 0)
2724  {
2725  hypre_CSRMatrixI(A->offd) = NULL;
2726  hypre_CSRMatrixJ(A->offd) = NULL;
2727  hypre_CSRMatrixData(A->offd) = NULL;
2728  }
2729  if (colMapOwner >= 0)
2730  {
2731  if (colMapOwner & 1)
2732  {
2734  }
2735  hypre_ParCSRMatrixColMapOffd(A) = NULL;
2736  }
2737 
2738  if (ParCSROwner)
2739  {
2740  hypre_ParCSRMatrixDestroy(A);
2741  }
2742 }
2743 
2745 {
2746 #ifndef HYPRE_BIGINT
2747  bool own_i = A_hyp.GetDiagMemoryI().OwnsHostPtr();
2748  bool own_j = A_hyp.GetDiagMemoryJ().OwnsHostPtr();
2749  MFEM_CONTRACT_VAR(own_j);
2750  MFEM_ASSERT(own_i == own_j, "Inconsistent ownership");
2751  if (!own_i)
2752  {
2753  std::swap(A_diag.GetMemoryI(), A_hyp.GetDiagMemoryI());
2754  std::swap(A_diag.GetMemoryJ(), A_hyp.GetDiagMemoryJ());
2755  }
2756 #endif
2757  if (!A_hyp.GetDiagMemoryData().OwnsHostPtr())
2758  {
2759  std::swap(A_diag.GetMemoryData(), A_hyp.GetDiagMemoryData());
2760  }
2761  A_hyp.SetOwnerFlags(3, A_hyp.OwnsOffd(), A_hyp.OwnsColMap());
2762 }
2763 
2764 #if MFEM_HYPRE_VERSION >= 21800
2765 
2767  const Vector *b, HypreParVector *d,
2768  int blocksize, BlockInverseScaleJob job)
2769 {
2770  if (job == BlockInverseScaleJob::MATRIX_ONLY ||
2772  {
2773  hypre_ParCSRMatrix *C_hypre;
2774  hypre_ParcsrBdiagInvScal(*A, blocksize, &C_hypre);
2775  hypre_ParCSRMatrixDropSmallEntries(C_hypre, 1e-15, 1);
2776  C->WrapHypreParCSRMatrix(C_hypre);
2777  }
2778 
2779  if (job == BlockInverseScaleJob::RHS_ONLY ||
2781  {
2782  HypreParVector b_Hypre(A->GetComm(),
2783  A->GetGlobalNumRows(),
2784  b->GetData(), A->GetRowStarts());
2785  hypre_ParVector *d_hypre;
2786  hypre_ParvecBdiagInvScal(b_Hypre, blocksize, &d_hypre, *A);
2787 
2788  d->WrapHypreParVector(d_hypre, true);
2789  }
2790 }
2791 
2792 #endif
2793 
2794 #if MFEM_HYPRE_VERSION < 21400
2795 
2797  double beta, const HypreParMatrix &B)
2798 {
2799  hypre_ParCSRMatrix *C_hypre =
2800  internal::hypre_ParCSRMatrixAdd(const_cast<HypreParMatrix &>(A),
2801  const_cast<HypreParMatrix &>(B));
2802  MFEM_VERIFY(C_hypre, "error in hypre_ParCSRMatrixAdd");
2803 
2804  if (!hypre_ParCSRMatrixCommPkg(C_hypre)) { hypre_MatvecCommPkgCreate(C_hypre); }
2805  HypreParMatrix *C = new HypreParMatrix(C_hypre);
2806  *C = 0.0;
2807  C->Add(alpha, A);
2808  C->Add(beta, B);
2809 
2810  return C;
2811 }
2812 
2814 {
2815  hypre_ParCSRMatrix * C = internal::hypre_ParCSRMatrixAdd(*A,*B);
2816 
2817  if (!hypre_ParCSRMatrixCommPkg(C)) { hypre_MatvecCommPkgCreate(C); }
2818 
2819  return new HypreParMatrix(C);
2820 }
2821 
2822 #else
2823 
2824 HypreParMatrix *Add(double alpha, const HypreParMatrix &A,
2825  double beta, const HypreParMatrix &B)
2826 {
2827  hypre_ParCSRMatrix *C;
2828 #if MFEM_HYPRE_VERSION <= 22000
2829  hypre_ParcsrAdd(alpha, A, beta, B, &C);
2830 #else
2831  hypre_ParCSRMatrixAdd(alpha, A, beta, B, &C);
2832 #endif
2833  if (!hypre_ParCSRMatrixCommPkg(C)) { hypre_MatvecCommPkgCreate(C); }
2834 
2835  return new HypreParMatrix(C);
2836 }
2837 
2838 HypreParMatrix * ParAdd(const HypreParMatrix *A, const HypreParMatrix *B)
2839 {
2840  hypre_ParCSRMatrix *C;
2841 #if MFEM_HYPRE_VERSION <= 22000
2842  hypre_ParcsrAdd(1.0, *A, 1.0, *B, &C);
2843 #else
2844  hypre_ParCSRMatrixAdd(1.0, *A, 1.0, *B, &C);
2845 #endif
2846  if (!hypre_ParCSRMatrixCommPkg(C)) { hypre_MatvecCommPkgCreate(C); }
2847 
2848  return new HypreParMatrix(C);
2849 }
2850 
2851 #endif
2852 
2854  bool own_matrix)
2855 {
2856  hypre_ParCSRMatrix * ab;
2857 #ifdef HYPRE_USING_GPU
2858  ab = hypre_ParCSRMatMat(*A, *B);
2859 #else
2860  ab = hypre_ParMatmul(*A,*B);
2861 #endif
2862  hypre_ParCSRMatrixSetNumNonzeros(ab);
2863 
2864  if (!hypre_ParCSRMatrixCommPkg(ab)) { hypre_MatvecCommPkgCreate(ab); }
2865  HypreParMatrix *C = new HypreParMatrix(ab);
2866  if (own_matrix)
2867  {
2868  C->CopyRowStarts();
2869  C->CopyColStarts();
2870  }
2871  return C;
2872 }
2873 
2875 {
2876  hypre_ParCSRMatrix * rap;
2877 
2878 #ifdef HYPRE_USING_GPU
2879  // FIXME: this way of computing Pt A P can completely eliminate zero rows
2880  // from the sparsity pattern of the product which prevents
2881  // EliminateZeroRows() from working correctly. This issue is observed
2882  // in ex28p.
2883  // Quick fix: add a diagonal matrix with 0 diagonal.
2884  // Maybe use hypre_CSRMatrixCheckDiagFirst to see if we need the fix.
2885  {
2886  hypre_ParCSRMatrix *Q = hypre_ParCSRMatMat(*A,*P);
2887  const bool keepTranspose = false;
2888  rap = hypre_ParCSRTMatMatKT(*P,Q,keepTranspose);
2889  hypre_ParCSRMatrixDestroy(Q);
2890 
2891  // alternative:
2892  // hypre_ParCSRMatrixRAPKT
2893  }
2894 #else
2895 #if MFEM_HYPRE_VERSION <= 22200
2896  HYPRE_Int P_owns_its_col_starts =
2897  hypre_ParCSRMatrixOwnsColStarts((hypre_ParCSRMatrix*)(*P));
2898 #endif
2899 
2900  hypre_BoomerAMGBuildCoarseOperator(*P,*A,*P,&rap);
2901 
2902 #if MFEM_HYPRE_VERSION <= 22200
2903  /* Warning: hypre_BoomerAMGBuildCoarseOperator steals the col_starts
2904  from P (even if it does not own them)! */
2905  hypre_ParCSRMatrixSetRowStartsOwner(rap,0);
2906  hypre_ParCSRMatrixSetColStartsOwner(rap,0);
2907  if (P_owns_its_col_starts)
2908  {
2909  hypre_ParCSRMatrixSetColStartsOwner(*P, 1);
2910  }
2911 #endif
2912 #endif
2913 
2914  hypre_ParCSRMatrixSetNumNonzeros(rap);
2915  // hypre_MatvecCommPkgCreate(rap);
2916 
2917  return new HypreParMatrix(rap);
2918 }
2919 
2921  const HypreParMatrix *P)
2922 {
2923  hypre_ParCSRMatrix * rap;
2924 
2925 #ifdef HYPRE_USING_GPU
2926  {
2927  hypre_ParCSRMatrix *Q = hypre_ParCSRMatMat(*A,*P);
2928  rap = hypre_ParCSRTMatMat(*Rt,Q);
2929  hypre_ParCSRMatrixDestroy(Q);
2930  }
2931 #else
2932 #if MFEM_HYPRE_VERSION <= 22200
2933  HYPRE_Int P_owns_its_col_starts =
2934  hypre_ParCSRMatrixOwnsColStarts((hypre_ParCSRMatrix*)(*P));
2935  HYPRE_Int Rt_owns_its_col_starts =
2936  hypre_ParCSRMatrixOwnsColStarts((hypre_ParCSRMatrix*)(*Rt));
2937 #endif
2938 
2939  hypre_BoomerAMGBuildCoarseOperator(*Rt,*A,*P,&rap);
2940 
2941 #if MFEM_HYPRE_VERSION <= 22200
2942  /* Warning: hypre_BoomerAMGBuildCoarseOperator steals the col_starts
2943  from Rt and P (even if they do not own them)! */
2944  hypre_ParCSRMatrixSetRowStartsOwner(rap,0);
2945  hypre_ParCSRMatrixSetColStartsOwner(rap,0);
2946  if (P_owns_its_col_starts)
2947  {
2948  hypre_ParCSRMatrixSetColStartsOwner(*P, 1);
2949  }
2950  if (Rt_owns_its_col_starts)
2951  {
2952  hypre_ParCSRMatrixSetColStartsOwner(*Rt, 1);
2953  }
2954 #endif
2955 #endif
2956 
2957  hypre_ParCSRMatrixSetNumNonzeros(rap);
2958  // hypre_MatvecCommPkgCreate(rap);
2959 
2960  return new HypreParMatrix(rap);
2961 }
2962 
2963 // Helper function for HypreParMatrixFromBlocks. Note that scalability to
2964 // extremely large processor counts is limited by the use of MPI_Allgather.
2965 void GatherBlockOffsetData(MPI_Comm comm, const int rank, const int nprocs,
2966  const int num_loc, const Array<int> &offsets,
2967  std::vector<int> &all_num_loc, const int numBlocks,
2968  std::vector<std::vector<HYPRE_BigInt>> &blockProcOffsets,
2969  std::vector<HYPRE_BigInt> &procOffsets,
2970  std::vector<std::vector<int>> &procBlockOffsets,
2971  HYPRE_BigInt &firstLocal, HYPRE_BigInt &globalNum)
2972 {
2973  std::vector<std::vector<int>> all_block_num_loc(numBlocks);
2974 
2975  MPI_Allgather(&num_loc, 1, MPI_INT, all_num_loc.data(), 1, MPI_INT, comm);
2976 
2977  for (int j = 0; j < numBlocks; ++j)
2978  {
2979  all_block_num_loc[j].resize(nprocs);
2980  blockProcOffsets[j].resize(nprocs);
2981 
2982  const int blockNumRows = offsets[j + 1] - offsets[j];
2983  MPI_Allgather(&blockNumRows, 1, MPI_INT, all_block_num_loc[j].data(), 1,
2984  MPI_INT, comm);
2985  blockProcOffsets[j][0] = 0;
2986  for (int i = 0; i < nprocs - 1; ++i)
2987  {
2988  blockProcOffsets[j][i + 1] = blockProcOffsets[j][i]
2989  + all_block_num_loc[j][i];
2990  }
2991  }
2992 
2993  firstLocal = 0;
2994  globalNum = 0;
2995  procOffsets[0] = 0;
2996  for (int i = 0; i < nprocs; ++i)
2997  {
2998  globalNum += all_num_loc[i];
2999  MFEM_VERIFY(globalNum >= 0, "overflow in global size");
3000  if (i < rank)
3001  {
3002  firstLocal += all_num_loc[i];
3003  }
3004 
3005  if (i < nprocs - 1)
3006  {
3007  procOffsets[i + 1] = procOffsets[i] + all_num_loc[i];
3008  }
3009 
3010  procBlockOffsets[i].resize(numBlocks);
3011  procBlockOffsets[i][0] = 0;
3012  for (int j = 1; j < numBlocks; ++j)
3013  {
3014  procBlockOffsets[i][j] = procBlockOffsets[i][j - 1]
3015  + all_block_num_loc[j - 1][i];
3016  }
3017  }
3018 }
3019 
3021  Array2D<double> *blockCoeff)
3022 {
3023  const int numBlockRows = blocks.NumRows();
3024  const int numBlockCols = blocks.NumCols();
3025 
3026  MFEM_VERIFY(numBlockRows > 0 &&
3027  numBlockCols > 0, "Invalid input to HypreParMatrixFromBlocks");
3028 
3029  if (blockCoeff != NULL)
3030  {
3031  MFEM_VERIFY(numBlockRows == blockCoeff->NumRows() &&
3032  numBlockCols == blockCoeff->NumCols(),
3033  "Invalid input to HypreParMatrixFromBlocks");
3034  }
3035 
3036  Array<int> rowOffsets(numBlockRows+1);
3037  Array<int> colOffsets(numBlockCols+1);
3038 
3039  int nonNullBlockRow0 = -1;
3040  for (int j=0; j<numBlockCols; ++j)
3041  {
3042  if (blocks(0,j) != NULL)
3043  {
3044  nonNullBlockRow0 = j;
3045  break;
3046  }
3047  }
3048 
3049  MFEM_VERIFY(nonNullBlockRow0 >= 0, "Null row of blocks");
3050  MPI_Comm comm = blocks(0,nonNullBlockRow0)->GetComm();
3051 
3052  // Set offsets based on the number of rows or columns in each block.
3053  rowOffsets = 0;
3054  colOffsets = 0;
3055  for (int i=0; i<numBlockRows; ++i)
3056  {
3057  for (int j=0; j<numBlockCols; ++j)
3058  {
3059  if (blocks(i,j) != NULL)
3060  {
3061  const int nrows = blocks(i,j)->NumRows();
3062  const int ncols = blocks(i,j)->NumCols();
3063 
3064  if (rowOffsets[i+1] == 0)
3065  {
3066  rowOffsets[i+1] = nrows;
3067  }
3068  else
3069  {
3070  MFEM_VERIFY(rowOffsets[i+1] == nrows,
3071  "Inconsistent blocks in HypreParMatrixFromBlocks");
3072  }
3073 
3074  if (colOffsets[j+1] == 0)
3075  {
3076  colOffsets[j+1] = ncols;
3077  }
3078  else
3079  {
3080  MFEM_VERIFY(colOffsets[j+1] == ncols,
3081  "Inconsistent blocks in HypreParMatrixFromBlocks");
3082  }
3083  }
3084  }
3085  rowOffsets[i+1] += rowOffsets[i];
3086  }
3087 
3088  for (int j=0; j<numBlockCols; ++j)
3089  {
3090  colOffsets[j+1] += colOffsets[j];
3091  }
3092 
3093  const int num_loc_rows = rowOffsets[numBlockRows];
3094  const int num_loc_cols = colOffsets[numBlockCols];
3095 
3096  int nprocs, rank;
3097  MPI_Comm_rank(comm, &rank);
3098  MPI_Comm_size(comm, &nprocs);
3099 
3100  std::vector<int> all_num_loc_rows(nprocs);
3101  std::vector<int> all_num_loc_cols(nprocs);
3102  std::vector<HYPRE_BigInt> procRowOffsets(nprocs);
3103  std::vector<HYPRE_BigInt> procColOffsets(nprocs);
3104  std::vector<std::vector<HYPRE_BigInt>> blockRowProcOffsets(numBlockRows);
3105  std::vector<std::vector<HYPRE_BigInt>> blockColProcOffsets(numBlockCols);
3106  std::vector<std::vector<int>> procBlockRowOffsets(nprocs);
3107  std::vector<std::vector<int>> procBlockColOffsets(nprocs);
3108 
3109  HYPRE_BigInt first_loc_row, glob_nrows, first_loc_col, glob_ncols;
3110  GatherBlockOffsetData(comm, rank, nprocs, num_loc_rows, rowOffsets,
3111  all_num_loc_rows, numBlockRows, blockRowProcOffsets,
3112  procRowOffsets, procBlockRowOffsets, first_loc_row,
3113  glob_nrows);
3114 
3115  GatherBlockOffsetData(comm, rank, nprocs, num_loc_cols, colOffsets,
3116  all_num_loc_cols, numBlockCols, blockColProcOffsets,
3117  procColOffsets, procBlockColOffsets, first_loc_col,
3118  glob_ncols);
3119 
3120  std::vector<int> opI(num_loc_rows + 1);
3121  std::vector<int> cnt(num_loc_rows);
3122 
3123  for (int i = 0; i < num_loc_rows; ++i)
3124  {
3125  opI[i] = 0;
3126  cnt[i] = 0;
3127  }
3128 
3129  opI[num_loc_rows] = 0;
3130 
3131  Array2D<hypre_CSRMatrix *> csr_blocks(numBlockRows, numBlockCols);
3132 
3133  // Loop over all blocks, to determine nnz for each row.
3134  for (int i = 0; i < numBlockRows; ++i)
3135  {
3136  for (int j = 0; j < numBlockCols; ++j)
3137  {
3138  if (blocks(i, j) == NULL)
3139  {
3140  csr_blocks(i, j) = NULL;
3141  }
3142  else
3143  {
3144  blocks(i, j)->HostRead();
3145  csr_blocks(i, j) = hypre_MergeDiagAndOffd(*blocks(i, j));
3146  blocks(i, j)->HypreRead();
3147 
3148  for (int k = 0; k < csr_blocks(i, j)->num_rows; ++k)
3149  {
3150  opI[rowOffsets[i] + k + 1] +=
3151  csr_blocks(i, j)->i[k + 1] - csr_blocks(i, j)->i[k];
3152  }
3153  }
3154  }
3155  }
3156 
3157  // Now opI[i] is nnz for row i-1. Do a partial sum to get offsets.
3158  for (int i = 0; i < num_loc_rows; ++i)
3159  {
3160  opI[i + 1] += opI[i];
3161  }
3162 
3163  const int nnz = opI[num_loc_rows];
3164 
3165  std::vector<HYPRE_BigInt> opJ(nnz);
3166  std::vector<double> data(nnz);
3167 
3168  // Loop over all blocks, to set matrix data.
3169  for (int i = 0; i < numBlockRows; ++i)
3170  {
3171  for (int j = 0; j < numBlockCols; ++j)
3172  {
3173  if (csr_blocks(i, j) != NULL)
3174  {
3175  const int nrows = csr_blocks(i, j)->num_rows;
3176  const double cij = blockCoeff ? (*blockCoeff)(i, j) : 1.0;
3177 #if MFEM_HYPRE_VERSION >= 21600
3178  const bool usingBigJ = (csr_blocks(i, j)->big_j != NULL);
3179 #endif
3180 
3181  for (int k = 0; k < nrows; ++k)
3182  {
3183  const int rowg = rowOffsets[i] + k; // process-local row
3184  const int nnz_k = csr_blocks(i,j)->i[k+1]-csr_blocks(i,j)->i[k];
3185  const int osk = csr_blocks(i, j)->i[k];
3186 
3187  for (int l = 0; l < nnz_k; ++l)
3188  {
3189  // Find the column process offset for the block.
3190 #if MFEM_HYPRE_VERSION >= 21600
3191  const HYPRE_Int bcol = usingBigJ ?
3192  csr_blocks(i, j)->big_j[osk + l] :
3193  csr_blocks(i, j)->j[osk + l];
3194 #else
3195  const HYPRE_Int bcol = csr_blocks(i, j)->j[osk + l];
3196 #endif
3197 
3198  // find the processor 'bcolproc' that holds column 'bcol':
3199  const auto &offs = blockColProcOffsets[j];
3200  const int bcolproc =
3201  std::upper_bound(offs.begin() + 1, offs.end(), bcol)
3202  - offs.begin() - 1;
3203 
3204  opJ[opI[rowg] + cnt[rowg]] = procColOffsets[bcolproc] +
3205  procBlockColOffsets[bcolproc][j]
3206  + bcol
3207  - blockColProcOffsets[j][bcolproc];
3208  data[opI[rowg] + cnt[rowg]] = cij * csr_blocks(i, j)->data[osk + l];
3209  cnt[rowg]++;
3210  }
3211  }
3212  }
3213  }
3214  }
3215 
3216  for (int i = 0; i < numBlockRows; ++i)
3217  {
3218  for (int j = 0; j < numBlockCols; ++j)
3219  {
3220  if (csr_blocks(i, j) != NULL)
3221  {
3222  hypre_CSRMatrixDestroy(csr_blocks(i, j));
3223  }
3224  }
3225  }
3226 
3227  MFEM_VERIFY(HYPRE_AssumedPartitionCheck(),
3228  "only 'assumed partition' mode is supported");
3229 
3230  std::vector<HYPRE_BigInt> rowStarts2(2);
3231  rowStarts2[0] = first_loc_row;
3232  rowStarts2[1] = first_loc_row + all_num_loc_rows[rank];
3233 
3234  int square = std::equal(all_num_loc_rows.begin(), all_num_loc_rows.end(),
3235  all_num_loc_cols.begin());
3236  if (square)
3237  {
3238  return new HypreParMatrix(comm, num_loc_rows, glob_nrows, glob_ncols,
3239  opI.data(), opJ.data(),
3240  data.data(),
3241  rowStarts2.data(),
3242  rowStarts2.data());
3243  }
3244  else
3245  {
3246  std::vector<HYPRE_BigInt> colStarts2(2);
3247  colStarts2[0] = first_loc_col;
3248  colStarts2[1] = first_loc_col + all_num_loc_cols[rank];
3249 
3250  return new HypreParMatrix(comm, num_loc_rows, glob_nrows, glob_ncols,
3251  opI.data(), opJ.data(),
3252  data.data(),
3253  rowStarts2.data(),
3254  colStarts2.data());
3255  }
3256 }
3257 
3258 void EliminateBC(const HypreParMatrix &A, const HypreParMatrix &Ae,
3259  const Array<int> &ess_dof_list,
3260  const Vector &X, Vector &B)
3261 {
3262  A.EliminateBC(Ae, ess_dof_list, X, B);
3263 }
3264 
3265 // Taubin or "lambda-mu" scheme, which alternates between positive and
3266 // negative step sizes to approximate low-pass filter effect.
3267 
3268 int ParCSRRelax_Taubin(hypre_ParCSRMatrix *A, // matrix to relax with
3269  hypre_ParVector *f, // right-hand side
3270  double lambda,
3271  double mu,
3272  int N,
3273  double max_eig,
3274  hypre_ParVector *u, // initial/updated approximation
3275  hypre_ParVector *r // another temp vector
3276  )
3277 {
3278  hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
3279  HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
3280 
3281  double *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
3282  double *r_data = hypre_VectorData(hypre_ParVectorLocalVector(r));
3283 
3284  for (int i = 0; i < N; i++)
3285  {
3286  // get residual: r = f - A*u
3287  hypre_ParVectorCopy(f, r);
3288  hypre_ParCSRMatrixMatvec(-1.0, A, u, 1.0, r);
3289 
3290  double coef;
3291  (0 == (i % 2)) ? coef = lambda : coef = mu;
3292 
3293  for (HYPRE_Int j = 0; j < num_rows; j++)
3294  {
3295  u_data[j] += coef*r_data[j] / max_eig;
3296  }
3297  }
3298 
3299  return 0;
3300 }
3301 
3302 // FIR scheme, which uses Chebyshev polynomials and a window function
3303 // to approximate a low-pass step filter.
3304 
3305 int ParCSRRelax_FIR(hypre_ParCSRMatrix *A, // matrix to relax with
3306  hypre_ParVector *f, // right-hand side
3307  double max_eig,
3308  int poly_order,
3309  double* fir_coeffs,
3310  hypre_ParVector *u, // initial/updated approximation
3311  hypre_ParVector *x0, // temporaries
3312  hypre_ParVector *x1,
3313  hypre_ParVector *x2,
3314  hypre_ParVector *x3)
3315 
3316 {
3317  hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
3318  HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
3319 
3320  double *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
3321 
3322  double *x0_data = hypre_VectorData(hypre_ParVectorLocalVector(x0));
3323  double *x1_data = hypre_VectorData(hypre_ParVectorLocalVector(x1));
3324  double *x2_data = hypre_VectorData(hypre_ParVectorLocalVector(x2));
3325  double *x3_data = hypre_VectorData(hypre_ParVectorLocalVector(x3));
3326 
3327  hypre_ParVectorCopy(u, x0);
3328 
3329  // x1 = f -A*x0/max_eig
3330  hypre_ParVectorCopy(f, x1);
3331  hypre_ParCSRMatrixMatvec(-1.0, A, x0, 1.0, x1);
3332 
3333  for (HYPRE_Int i = 0; i < num_rows; i++)
3334  {
3335  x1_data[i] /= -max_eig;
3336  }
3337 
3338  // x1 = x0 -x1
3339  for (HYPRE_Int i = 0; i < num_rows; i++)
3340  {
3341  x1_data[i] = x0_data[i] -x1_data[i];
3342  }
3343 
3344  // x3 = f0*x0 +f1*x1
3345  for (HYPRE_Int i = 0; i < num_rows; i++)
3346  {
3347  x3_data[i] = fir_coeffs[0]*x0_data[i] +fir_coeffs[1]*x1_data[i];
3348  }
3349 
3350  for (int n = 2; n <= poly_order; n++)
3351  {
3352  // x2 = f - A*x1/max_eig
3353  hypre_ParVectorCopy(f, x2);
3354  hypre_ParCSRMatrixMatvec(-1.0, A, x1, 1.0, x2);
3355 
3356  for (HYPRE_Int i = 0; i < num_rows; i++)
3357  {
3358  x2_data[i] /= -max_eig;
3359  }
3360 
3361  // x2 = (x1-x0) +(x1-2*x2)
3362  // x3 = x3 +f[n]*x2
3363  // x0 = x1
3364  // x1 = x2
3365 
3366  for (HYPRE_Int i = 0; i < num_rows; i++)
3367  {
3368  x2_data[i] = (x1_data[i]-x0_data[i]) +(x1_data[i]-2*x2_data[i]);
3369  x3_data[i] += fir_coeffs[n]*x2_data[i];
3370  x0_data[i] = x1_data[i];
3371  x1_data[i] = x2_data[i];
3372  }
3373  }
3374 
3375  for (HYPRE_Int i = 0; i < num_rows; i++)
3376  {
3377  u_data[i] = x3_data[i];
3378  }
3379 
3380  return 0;
3381 }
3382 
3384 {
3385  type = default_type;
3386  relax_times = 1;
3387  relax_weight = 1.0;
3388  omega = 1.0;
3389  poly_order = 2;
3390  poly_fraction = .3;
3391  lambda = 0.5;
3392  mu = -0.5;
3393  taubin_iter = 40;
3394 
3395  l1_norms = NULL;
3396  pos_l1_norms = false;
3397  eig_est_cg_iter = 10;
3398  B = X = V = Z = NULL;
3399  auxB.Reset(); auxX.Reset();
3400  X0 = X1 = NULL;
3401  fir_coeffs = NULL;
3402  A_is_symmetric = false;
3403 }
3404 
3406  int relax_times_, double relax_weight_,
3407  double omega_, int poly_order_,
3408  double poly_fraction_, int eig_est_cg_iter_)
3409 {
3410  type = type_;
3411  relax_times = relax_times_;
3412  relax_weight = relax_weight_;
3413  omega = omega_;
3414  poly_order = poly_order_;
3415  poly_fraction = poly_fraction_;
3416  eig_est_cg_iter = eig_est_cg_iter_;
3417 
3418  l1_norms = NULL;
3419  pos_l1_norms = false;
3420  B = X = V = Z = NULL;
3421  auxB.Reset(); auxX.Reset();
3422  X0 = X1 = NULL;
3423  fir_coeffs = NULL;
3424  A_is_symmetric = false;
3425 
3426  SetOperator(A_);
3427 }
3428 
3429 void HypreSmoother::SetType(HypreSmoother::Type type_, int relax_times_)
3430 {
3431  type = static_cast<int>(type_);
3432  relax_times = relax_times_;
3433 }
3434 
3435 void HypreSmoother::SetSOROptions(double relax_weight_, double omega_)
3436 {
3437  relax_weight = relax_weight_;
3438  omega = omega_;
3439 }
3440 
3441 void HypreSmoother::SetPolyOptions(int poly_order_, double poly_fraction_,
3442  int eig_est_cg_iter_)
3443 {
3444  poly_order = poly_order_;
3445  poly_fraction = poly_fraction_;
3446  eig_est_cg_iter = eig_est_cg_iter_;
3447 }
3448 
3449 void HypreSmoother::SetTaubinOptions(double lambda_, double mu_,
3450  int taubin_iter_)
3451 {
3452  lambda = lambda_;
3453  mu = mu_;
3454  taubin_iter = taubin_iter_;
3455 }
3456 
3457 void HypreSmoother::SetWindowByName(const char* name)
3458 {
3459  double a = -1, b, c;
3460  if (!strcmp(name,"Rectangular")) { a = 1.0, b = 0.0, c = 0.0; }
3461  if (!strcmp(name,"Hanning")) { a = 0.5, b = 0.5, c = 0.0; }
3462  if (!strcmp(name,"Hamming")) { a = 0.54, b = 0.46, c = 0.0; }
3463  if (!strcmp(name,"Blackman")) { a = 0.42, b = 0.50, c = 0.08; }
3464  if (a < 0)
3465  {
3466  mfem_error("HypreSmoother::SetWindowByName : name not recognized!");
3467  }
3468 
3469  SetWindowParameters(a, b, c);
3470 }
3471 
3472 void HypreSmoother::SetWindowParameters(double a, double b, double c)
3473 {
3474  window_params[0] = a;
3475  window_params[1] = b;
3476  window_params[2] = c;
3477 }
3478 
3480 {
3481  A = const_cast<HypreParMatrix *>(dynamic_cast<const HypreParMatrix *>(&op));
3482  if (A == NULL)
3483  {
3484  mfem_error("HypreSmoother::SetOperator : not HypreParMatrix!");
3485  }
3486 
3487  height = A->Height();
3488  width = A->Width();
3489 
3490  auxX.Delete(); auxB.Delete();
3491  if (B) { delete B; }
3492  if (X) { delete X; }
3493  if (V) { delete V; }
3494  if (Z) { delete Z; }
3495  if (l1_norms)
3496  {
3497  mfem_hypre_TFree(l1_norms);
3498  }
3499  delete X0;
3500  delete X1;
3501 
3502  X1 = X0 = Z = V = B = X = NULL;
3503  auxB.Reset(); auxX.Reset();
3504 
3505  if (type >= 1 && type <= 4)
3506  {
3507  hypre_ParCSRComputeL1Norms(*A, type, NULL, &l1_norms);
3508  // The above call will set the hypre_error_flag when it encounters zero
3509  // rows in A.
3510  }
3511  else if (type == 5)
3512  {
3513  l1_norms = mfem_hypre_CTAlloc(double, height);
3514  Vector ones(height), diag(l1_norms, height);
3515  ones = 1.0;
3516  A->Mult(ones, diag);
3517  }
3518  else
3519  {
3520  l1_norms = NULL;
3521  }
3522  if (l1_norms && pos_l1_norms)
3523  {
3524 #if defined(HYPRE_USING_GPU)
3525  double *d_l1_norms = l1_norms; // avoid *this capture
3526  MFEM_GPU_FORALL(i, height,
3527  {
3528  d_l1_norms[i] = std::abs(d_l1_norms[i]);
3529  });
3530 #else
3531  for (int i = 0; i < height; i++)
3532  {
3533  l1_norms[i] = std::abs(l1_norms[i]);
3534  }
3535 #endif
3536  }
3537 
3538  if (type == 16)
3539  {
3540  poly_scale = 1;
3541  if (eig_est_cg_iter > 0)
3542  {
3543  hypre_ParCSRMaxEigEstimateCG(*A, poly_scale, eig_est_cg_iter,
3545  }
3546  else
3547  {
3548 #if MFEM_HYPRE_VERSION <= 22200
3549  min_eig_est = 0;
3550  hypre_ParCSRMaxEigEstimate(*A, poly_scale, &max_eig_est);
3551 #else
3552  hypre_ParCSRMaxEigEstimate(*A, poly_scale, &max_eig_est, &min_eig_est);
3553 #endif
3554  }
3555  Z = new HypreParVector(*A);
3556  }
3557  else if (type == 1001 || type == 1002)
3558  {
3559  poly_scale = 0;
3560  if (eig_est_cg_iter > 0)
3561  {
3562  hypre_ParCSRMaxEigEstimateCG(*A, poly_scale, eig_est_cg_iter,
3564  }
3565  else
3566  {
3567 #if MFEM_HYPRE_VERSION <= 22200
3568  min_eig_est = 0;
3569  hypre_ParCSRMaxEigEstimate(*A, poly_scale, &max_eig_est);
3570 #else
3571  hypre_ParCSRMaxEigEstimate(*A, poly_scale, &max_eig_est, &min_eig_est);
3572 #endif
3573  }
3574 
3575  // The Taubin and FIR polynomials are defined on [0, 2]
3576  max_eig_est /= 2;
3577 
3578  // Compute window function, Chebyshev coefficients, and allocate temps.
3579  if (type == 1002)
3580  {
3581  // Temporaries for Chebyshev recursive evaluation
3582  Z = new HypreParVector(*A);
3583  X0 = new HypreParVector(*A);
3584  X1 = new HypreParVector(*A);
3585 
3587  }
3588  }
3589 }
3590 
3592 {
3593  if (fir_coeffs)
3594  {
3595  delete [] fir_coeffs;
3596  }
3597 
3598  fir_coeffs = new double[poly_order+1];
3599 
3600  double* window_coeffs = new double[poly_order+1];
3601  double* cheby_coeffs = new double[poly_order+1];
3602 
3603  double a = window_params[0];
3604  double b = window_params[1];
3605  double c = window_params[2];
3606  for (int i = 0; i <= poly_order; i++)
3607  {
3608  double t = (i*M_PI)/(poly_order+1);
3609  window_coeffs[i] = a + b*cos(t) +c*cos(2*t);
3610  }
3611 
3612  double k_pb = poly_fraction*max_eig;
3613  double theta_pb = acos(1.0 -0.5*k_pb);
3614  double sigma = 0.0;
3615  cheby_coeffs[0] = (theta_pb +sigma)/M_PI;
3616  for (int i = 1; i <= poly_order; i++)
3617  {
3618  double t = i*(theta_pb+sigma);
3619  cheby_coeffs[i] = 2.0*sin(t)/(i*M_PI);
3620  }
3621 
3622  for (int i = 0; i <= poly_order; i++)
3623  {
3624  fir_coeffs[i] = window_coeffs[i]*cheby_coeffs[i];
3625  }
3626 
3627  delete[] window_coeffs;
3628  delete[] cheby_coeffs;
3629 }
3630 
3632 {
3633  if (A == NULL)
3634  {
3635  mfem_error("HypreSmoother::Mult (...) : HypreParMatrix A is missing");
3636  return;
3637  }
3638 
3639  // TODO: figure out where each function needs A, b, and x ...
3640 
3641  b.HypreRead();
3642  if (!iterative_mode)
3643  {
3644  x.HypreWrite();
3645  if (type == 0 && relax_times == 1)
3646  {
3647  // Note: hypre_ParCSRDiagScale() is not exposed in older versions
3648  HYPRE_ParCSRDiagScale(NULL, *A, b, x);
3649  if (relax_weight != 1.0)
3650  {
3651  hypre_ParVectorScale(relax_weight, x);
3652  }
3653  return;
3654  }
3655  hypre_ParVectorSetConstantValues(x, 0.0);
3656  }
3657  else
3658  {
3659  x.HypreReadWrite();
3660  }
3661 
3662  if (V == NULL)
3663  {
3664  V = new HypreParVector(*A);
3665  }
3666 
3667  if (type == 1001)
3668  {
3669  for (int sweep = 0; sweep < relax_times; sweep++)
3670  {
3672  max_eig_est,
3673  x, *V);
3674  }
3675  }
3676  else if (type == 1002)
3677  {
3678  for (int sweep = 0; sweep < relax_times; sweep++)
3679  {
3680  ParCSRRelax_FIR(*A, b,
3681  max_eig_est,
3682  poly_order,
3683  fir_coeffs,
3684  x,
3685  *X0, *X1, *V, *Z);
3686  }
3687  }
3688  else
3689  {
3690  int hypre_type = type;
3691  // hypre doesn't have lumped Jacobi, so treat the action as l1-Jacobi
3692  if (type == 5) { hypre_type = 1; }
3693 
3694  if (Z == NULL)
3695  {
3696  hypre_ParCSRRelax(*A, b, hypre_type,
3699  x, *V, NULL);
3700  }
3701  else
3702  {
3703  hypre_ParCSRRelax(*A, b, hypre_type,
3706  x, *V, *Z);
3707  }
3708  }
3709 }
3710 
3711 void HypreSmoother::Mult(const Vector &b, Vector &x) const
3712 {
3713  MFEM_ASSERT(b.Size() == NumCols(), "");
3714  MFEM_ASSERT(x.Size() == NumRows(), "");
3715 
3716  if (A == NULL)
3717  {
3718  mfem_error("HypreSmoother::Mult (...) : HypreParMatrix A is missing");
3719  return;
3720  }
3721 
3722  if (B == NULL)
3723  {
3724  B = new HypreParVector(A->GetComm(),
3725  A -> GetGlobalNumRows(),
3726  nullptr,
3727  A -> GetRowStarts());
3728  X = new HypreParVector(A->GetComm(),
3729  A -> GetGlobalNumCols(),
3730  nullptr,
3731  A -> GetColStarts());
3732  }
3733 
3734  const bool bshallow = CanShallowCopy(b.GetMemory(), GetHypreMemoryClass());
3735  const bool xshallow = CanShallowCopy(x.GetMemory(), GetHypreMemoryClass());
3736 
3737  if (bshallow)
3738  {
3739  B->WrapMemoryRead(b.GetMemory());
3740  }
3741  else
3742  {
3743  if (auxB.Empty()) { auxB.New(NumCols(), GetHypreMemoryType()); }
3744  auxB.CopyFrom(b.GetMemory(), auxB.Capacity()); // Deep copy
3745  B->WrapMemoryRead(auxB);
3746  }
3747 
3748  if (xshallow)
3749  {
3751  else { X->WrapMemoryWrite(x.GetMemory()); }
3752  }
3753  else
3754  {
3755  if (auxX.Empty()) { auxX.New(NumRows(), GetHypreMemoryType()); }
3756  if (iterative_mode)
3757  {
3758  auxX.CopyFrom(x.GetMemory(), x.Size()); // Deep copy
3760  }
3761  else
3762  {
3764  }
3765  }
3766 
3767  Mult(*B, *X);
3768 
3769  if (!xshallow) { x = *X; } // Deep copy
3770 }
3771 
3773 {
3774  if (A_is_symmetric || type == 0 || type == 1 || type == 5)
3775  {
3776  Mult(b, x);
3777  return;
3778  }
3779  mfem_error("HypreSmoother::MultTranspose (...) : undefined!\n");
3780 }
3781 
3783 {
3784  auxX.Delete(); auxB.Delete();
3785  if (B) { delete B; }
3786  if (X) { delete X; }
3787  if (V) { delete V; }
3788  if (Z) { delete Z; }
3789  if (l1_norms)
3790  {
3791  mfem_hypre_TFree(l1_norms);
3792  }
3793  if (fir_coeffs)
3794  {
3795  delete [] fir_coeffs;
3796  }
3797  if (X0) { delete X0; }
3798  if (X1) { delete X1; }
3799 }
3800 
3801 
3803 {
3804  A = NULL;
3805  setup_called = 0;
3806  B = X = NULL;
3807  auxB.Reset();
3808  auxX.Reset();
3810 }
3811 
3813  : Solver(A_->Height(), A_->Width())
3814 {
3815  A = A_;
3816  setup_called = 0;
3817  B = X = NULL;
3818  auxB.Reset();
3819  auxX.Reset();
3821 }
3822 
3823 bool HypreSolver::WrapVectors(const Vector &b, Vector &x) const
3824 {
3825  MFEM_ASSERT(b.Size() == NumCols(), "");
3826  MFEM_ASSERT(x.Size() == NumRows(), "");
3827 
3828  MFEM_VERIFY(A != NULL, "HypreParMatrix A is missing");
3829 
3830  if (B == NULL)
3831  {
3832  B = new HypreParVector(A->GetComm(), A->GetGlobalNumRows(),
3833  nullptr, A->GetRowStarts());
3834  X = new HypreParVector(A->GetComm(), A->GetGlobalNumCols(),
3835  nullptr, A->GetColStarts());
3836  }
3837 
3838  const bool bshallow = CanShallowCopy(b.GetMemory(), GetHypreMemoryClass());
3839  const bool xshallow = CanShallowCopy(x.GetMemory(), GetHypreMemoryClass());
3840 
3841  if (bshallow)
3842  {
3843  B->WrapMemoryRead(b.GetMemory());
3844  }
3845  else
3846  {
3847  if (auxB.Empty()) { auxB.New(NumCols(), GetHypreMemoryType()); }
3848  auxB.CopyFrom(b.GetMemory(), auxB.Capacity()); // Deep copy
3849  B->WrapMemoryRead(auxB);
3850  }
3851 
3852  if (xshallow)
3853  {
3855  else { X->WrapMemoryWrite(x.GetMemory()); }
3856  }
3857  else
3858  {
3859  if (auxX.Empty()) { auxX.New(NumRows(), GetHypreMemoryType()); }
3860  if (iterative_mode)
3861  {
3862  auxX.CopyFrom(x.GetMemory(), x.Size()); // Deep copy
3864  }
3865  else
3866  {
3868  }
3869  }
3870 
3871  return xshallow;
3872 }
3873 
3875 {
3876  if (setup_called) { return; }
3877 
3878  MFEM_VERIFY(A != NULL, "HypreParMatrix A is missing");
3879 
3880  HYPRE_Int err_flag = SetupFcn()(*this, *A, b, x);
3882  {
3883  if (err_flag)
3884  { MFEM_WARNING("Error during setup! Error code: " << err_flag); }
3885  }
3886  else if (error_mode == ABORT_HYPRE_ERRORS)
3887  {
3888  MFEM_VERIFY(!err_flag, "Error during setup! Error code: " << err_flag);
3889  }
3890  hypre_error_flag = 0;
3891  setup_called = 1;
3892 }
3893 
3894 void HypreSolver::Setup(const Vector &b, Vector &x) const
3895 {
3896  const bool x_shallow = WrapVectors(b, x);
3897  Setup(*B, *X);
3898  if (!x_shallow) { x = *X; } // Deep copy if shallow copy is impossible
3899 }
3900 
3902 {
3903  HYPRE_Int err_flag;
3904  if (A == NULL)
3905  {
3906  mfem_error("HypreSolver::Mult (...) : HypreParMatrix A is missing");
3907  return;
3908  }
3909 
3910  if (!iterative_mode)
3911  {
3912  x.HypreWrite();
3913  hypre_ParVectorSetConstantValues(x, 0.0);
3914  }
3915 
3916  b.HypreRead();
3917  x.HypreReadWrite();
3918 
3919  Setup(b, x);
3920 
3921  err_flag = SolveFcn()(*this, *A, b, x);
3923  {
3924  if (err_flag)
3925  { MFEM_WARNING("Error during solve! Error code: " << err_flag); }
3926  }
3927  else if (error_mode == ABORT_HYPRE_ERRORS)
3928  {
3929  MFEM_VERIFY(!err_flag, "Error during solve! Error code: " << err_flag);
3930  }
3931  hypre_error_flag = 0;
3932 }
3933 
3934 void HypreSolver::Mult(const Vector &b, Vector &x) const
3935 {
3936  const bool x_shallow = WrapVectors(b, x);
3937  Mult(*B, *X);
3938  if (!x_shallow) { x = *X; } // Deep copy if shallow copy is impossible
3939 }
3940 
3942 {
3943  if (B) { delete B; }
3944  if (X) { delete X; }
3945  auxB.Delete();
3946  auxX.Delete();
3947 }
3948 
3949 
3950 HyprePCG::HyprePCG(MPI_Comm comm) : precond(NULL)
3951 {
3952  iterative_mode = true;
3953 
3954  HYPRE_ParCSRPCGCreate(comm, &pcg_solver);
3955 }
3956 
3957 HyprePCG::HyprePCG(const HypreParMatrix &A_) : HypreSolver(&A_), precond(NULL)
3958 {
3959  MPI_Comm comm;
3960 
3961  iterative_mode = true;
3962 
3963  HYPRE_ParCSRMatrixGetComm(*A, &comm);
3964 
3965  HYPRE_ParCSRPCGCreate(comm, &pcg_solver);
3966 }
3967 
3969 {
3970  const HypreParMatrix *new_A = dynamic_cast<const HypreParMatrix *>(&op);
3971  MFEM_VERIFY(new_A, "new Operator must be a HypreParMatrix!");
3972 
3973  // update base classes: Operator, Solver, HypreSolver
3974  height = new_A->Height();
3975  width = new_A->Width();
3976  A = const_cast<HypreParMatrix *>(new_A);
3977  if (precond)
3978  {
3979  precond->SetOperator(*A);
3980  this->SetPreconditioner(*precond);
3981  }
3982  setup_called = 0;
3983  delete X;
3984  delete B;
3985  B = X = NULL;
3986  auxB.Delete(); auxB.Reset();
3987  auxX.Delete(); auxX.Reset();
3988 }
3989 
3990 void HyprePCG::SetTol(double tol)
3991 {
3992  HYPRE_PCGSetTol(pcg_solver, tol);
3993 }
3994 
3995 void HyprePCG::SetAbsTol(double atol)
3996 {
3997  HYPRE_PCGSetAbsoluteTol(pcg_solver, atol);
3998 }
3999 
4000 void HyprePCG::SetMaxIter(int max_iter)
4001 {
4002  HYPRE_PCGSetMaxIter(pcg_solver, max_iter);
4003 }
4004 
4005 void HyprePCG::SetLogging(int logging)
4006 {
4007  HYPRE_PCGSetLogging(pcg_solver, logging);
4008 }
4009 
4010 void HyprePCG::SetPrintLevel(int print_lvl)
4011 {
4012  HYPRE_ParCSRPCGSetPrintLevel(pcg_solver, print_lvl);
4013 }
4014 
4016 {
4017  precond = &precond_;
4018 
4019  HYPRE_ParCSRPCGSetPrecond(pcg_solver,
4020  precond_.SolveFcn(),
4021  precond_.SetupFcn(),
4022  precond_);
4023 }
4024 
4025 void HyprePCG::SetResidualConvergenceOptions(int res_frequency, double rtol)
4026 {
4027  HYPRE_PCGSetTwoNorm(pcg_solver, 1);
4028  if (res_frequency > 0)
4029  {
4030  HYPRE_PCGSetRecomputeResidualP(pcg_solver, res_frequency);
4031  }
4032  if (rtol > 0.0)
4033  {
4034  HYPRE_PCGSetResidualTol(pcg_solver, rtol);
4035  }
4036 }
4037 
4039 {
4040  int myid;
4041  HYPRE_Int time_index = 0;
4042  HYPRE_Int num_iterations;
4043  double final_res_norm;
4044  MPI_Comm comm;
4045  HYPRE_Int print_level;
4046 
4047  HYPRE_PCGGetPrintLevel(pcg_solver, &print_level);
4048  HYPRE_ParCSRPCGSetPrintLevel(pcg_solver, print_level%3);
4049 
4050  HYPRE_ParCSRMatrixGetComm(*A, &comm);
4051 
4052  if (!iterative_mode)
4053  {
4054  x.HypreWrite();
4055  hypre_ParVectorSetConstantValues(x, 0.0);
4056  }
4057 
4058  b.HypreRead();
4059  x.HypreReadWrite();
4060 
4061  if (!setup_called)
4062  {
4063  if (print_level > 0 && print_level < 3)
4064  {
4065  time_index = hypre_InitializeTiming("PCG Setup");
4066  hypre_BeginTiming(time_index);
4067  }
4068 
4069  HYPRE_ParCSRPCGSetup(pcg_solver, *A, b, x);
4070  setup_called = 1;
4071 
4072  if (print_level > 0 && print_level < 3)
4073  {
4074  hypre_EndTiming(time_index);
4075  hypre_PrintTiming("Setup phase times", comm);
4076  hypre_FinalizeTiming(time_index);
4077  hypre_ClearTiming();
4078  }
4079  }
4080 
4081  if (print_level > 0 && print_level < 3)
4082  {
4083  time_index = hypre_InitializeTiming("PCG Solve");
4084  hypre_BeginTiming(time_index);
4085  }
4086 
4087  HYPRE_ParCSRPCGSolve(pcg_solver, *A, b, x);
4088 
4089  if (print_level > 0)
4090  {
4091  if (print_level < 3)
4092  {
4093  hypre_EndTiming(time_index);
4094  hypre_PrintTiming("Solve phase times", comm);
4095  hypre_FinalizeTiming(time_index);
4096  hypre_ClearTiming();
4097  }
4098 
4099  HYPRE_ParCSRPCGGetNumIterations(pcg_solver, &num_iterations);
4100  HYPRE_ParCSRPCGGetFinalRelativeResidualNorm(pcg_solver,
4101  &final_res_norm);
4102 
4103  MPI_Comm_rank(comm, &myid);
4104 
4105  if (myid == 0)
4106  {
4107  mfem::out << "PCG Iterations = " << num_iterations << endl
4108  << "Final PCG Relative Residual Norm = " << final_res_norm
4109  << endl;
4110  }
4111  }
4112  HYPRE_ParCSRPCGSetPrintLevel(pcg_solver, print_level);
4113 }
4114 
4116 {
4117  HYPRE_ParCSRPCGDestroy(pcg_solver);
4118 }
4119 
4120 
4121 HypreGMRES::HypreGMRES(MPI_Comm comm) : precond(NULL)
4122 {
4123  iterative_mode = true;
4124 
4125  HYPRE_ParCSRGMRESCreate(comm, &gmres_solver);
4126  SetDefaultOptions();
4127 }
4128 
4130  : HypreSolver(&A_), precond(NULL)
4131 {
4132  MPI_Comm comm;
4133 
4134  iterative_mode = true;
4135 
4136  HYPRE_ParCSRMatrixGetComm(*A, &comm);
4137 
4138  HYPRE_ParCSRGMRESCreate(comm, &gmres_solver);
4139  SetDefaultOptions();
4140 }
4141 
4142 void HypreGMRES::SetDefaultOptions()
4143 {
4144  int k_dim = 50;
4145  int max_iter = 100;
4146  double tol = 1e-6;
4147 
4148  HYPRE_ParCSRGMRESSetKDim(gmres_solver, k_dim);
4149  HYPRE_ParCSRGMRESSetMaxIter(gmres_solver, max_iter);
4150  HYPRE_ParCSRGMRESSetTol(gmres_solver, tol);
4151 }
4152 
4154 {
4155  const HypreParMatrix *new_A = dynamic_cast<const HypreParMatrix *>(&op);
4156  MFEM_VERIFY(new_A, "new Operator must be a HypreParMatrix!");
4157 
4158  // update base classes: Operator, Solver, HypreSolver
4159  height = new_A->Height();
4160  width = new_A->Width();
4161  A = const_cast<HypreParMatrix *>(new_A);
4162  if (precond)
4163  {
4164  precond->SetOperator(*A);
4165  this->SetPreconditioner(*precond);
4166  }
4167  setup_called = 0;
4168  delete X;
4169  delete B;
4170  B = X = NULL;
4171  auxB.Delete(); auxB.Reset();
4172  auxX.Delete(); auxX.Reset();
4173 }
4174 
4175 void HypreGMRES::SetTol(double tol)
4176 {
4177  HYPRE_GMRESSetTol(gmres_solver, tol);
4178 }
4179 
4180 void HypreGMRES::SetAbsTol(double tol)
4181 {
4182  HYPRE_GMRESSetAbsoluteTol(gmres_solver, tol);
4183 }
4184 
4185 void HypreGMRES::SetMaxIter(int max_iter)
4186 {
4187  HYPRE_GMRESSetMaxIter(gmres_solver, max_iter);
4188 }
4189 
4190 void HypreGMRES::SetKDim(int k_dim)
4191 {
4192  HYPRE_GMRESSetKDim(gmres_solver, k_dim);
4193 }
4194 
4195 void HypreGMRES::SetLogging(int logging)
4196 {
4197  HYPRE_GMRESSetLogging(gmres_solver, logging);
4198 }
4199 
4200 void HypreGMRES::SetPrintLevel(int print_lvl)
4201 {
4202  HYPRE_GMRESSetPrintLevel(gmres_solver, print_lvl);
4203 }
4204 
4206 {
4207  precond = &precond_;
4208 
4209  HYPRE_ParCSRGMRESSetPrecond(gmres_solver,
4210  precond_.SolveFcn(),
4211  precond_.SetupFcn(),
4212  precond_);
4213 }
4214 
4216 {
4217  int myid;
4218  HYPRE_Int time_index = 0;
4219  HYPRE_Int num_iterations;
4220  double final_res_norm;
4221  MPI_Comm comm;
4222  HYPRE_Int print_level;
4223 
4224  HYPRE_GMRESGetPrintLevel(gmres_solver, &print_level);
4225 
4226  HYPRE_ParCSRMatrixGetComm(*A, &comm);
4227 
4228  if (!iterative_mode)
4229  {
4230  x.HypreWrite();
4231  hypre_ParVectorSetConstantValues(x, 0.0);
4232  }
4233 
4234  b.HypreRead();
4235  x.HypreReadWrite();
4236 
4237  if (!setup_called)
4238  {
4239  if (print_level > 0)
4240  {
4241  time_index = hypre_InitializeTiming("GMRES Setup");
4242  hypre_BeginTiming(time_index);
4243  }
4244 
4245  HYPRE_ParCSRGMRESSetup(gmres_solver, *A, b, x);
4246  setup_called = 1;
4247 
4248  if (print_level > 0)
4249  {
4250  hypre_EndTiming(time_index);
4251  hypre_PrintTiming("Setup phase times", comm);
4252  hypre_FinalizeTiming(time_index);
4253  hypre_ClearTiming();
4254  }
4255  }
4256 
4257  if (print_level > 0)
4258  {
4259  time_index = hypre_InitializeTiming("GMRES Solve");
4260  hypre_BeginTiming(time_index);
4261  }
4262 
4263  HYPRE_ParCSRGMRESSolve(gmres_solver, *A, b, x);
4264 
4265  if (print_level > 0)
4266  {
4267  hypre_EndTiming(time_index);
4268  hypre_PrintTiming("Solve phase times", comm);
4269  hypre_FinalizeTiming(time_index);
4270  hypre_ClearTiming();
4271 
4272  HYPRE_ParCSRGMRESGetNumIterations(gmres_solver, &num_iterations);
4273  HYPRE_ParCSRGMRESGetFinalRelativeResidualNorm(gmres_solver,
4274  &final_res_norm);
4275 
4276  MPI_Comm_rank(comm, &myid);
4277 
4278  if (myid == 0)
4279  {
4280  mfem::out << "GMRES Iterations = " << num_iterations << endl
4281  << "Final GMRES Relative Residual Norm = " << final_res_norm
4282  << endl;
4283  }
4284  }
4285 }
4286 
4288 {
4289  HYPRE_ParCSRGMRESDestroy(gmres_solver);
4290 }
4291 
4292 
4293 HypreFGMRES::HypreFGMRES(MPI_Comm comm) : precond(NULL)
4294 {
4295  iterative_mode = true;
4296 
4297  HYPRE_ParCSRFlexGMRESCreate(comm, &fgmres_solver);
4298  SetDefaultOptions();
4299 }
4300 
4302  : HypreSolver(&A_), precond(NULL)
4303 {
4304  MPI_Comm comm;
4305 
4306  iterative_mode = true;
4307 
4308  HYPRE_ParCSRMatrixGetComm(*A, &comm);
4309 
4310  HYPRE_ParCSRFlexGMRESCreate(comm, &fgmres_solver);
4311  SetDefaultOptions();
4312 }
4313 
4314 void HypreFGMRES::SetDefaultOptions()
4315 {
4316  int k_dim = 50;
4317  int max_iter = 100;
4318  double tol = 1e-6;
4319 
4320  HYPRE_ParCSRFlexGMRESSetKDim(fgmres_solver, k_dim);
4321  HYPRE_ParCSRFlexGMRESSetMaxIter(fgmres_solver, max_iter);
4322  HYPRE_ParCSRFlexGMRESSetTol(fgmres_solver, tol);
4323 }
4324 
4326 {
4327  const HypreParMatrix *new_A = dynamic_cast<const HypreParMatrix *>(&op);
4328  MFEM_VERIFY(new_A, "new Operator must be a HypreParMatrix!");
4329 
4330  // update base classes: Operator, Solver, HypreSolver
4331  height = new_A->Height();
4332  width = new_A->Width();
4333  A = const_cast<HypreParMatrix *>(new_A);
4334  if (precond)
4335  {
4336  precond->SetOperator(*A);
4337  this->SetPreconditioner(*precond);
4338  }
4339  setup_called = 0;
4340  delete X;
4341  delete B;
4342  B = X = NULL;
4343  auxB.Delete(); auxB.Reset();
4344  auxX.Delete(); auxX.Reset();
4345 }
4346 
4347 void HypreFGMRES::SetTol(double tol)
4348 {
4349  HYPRE_ParCSRFlexGMRESSetTol(fgmres_solver, tol);
4350 }
4351 
4352 void HypreFGMRES::SetMaxIter(int max_iter)
4353 {
4354  HYPRE_ParCSRFlexGMRESSetMaxIter(fgmres_solver, max_iter);
4355 }
4356 
4357 void HypreFGMRES::SetKDim(int k_dim)
4358 {
4359  HYPRE_ParCSRFlexGMRESSetKDim(fgmres_solver, k_dim);
4360 }
4361 
4362 void HypreFGMRES::SetLogging(int logging)
4363 {
4364  HYPRE_ParCSRFlexGMRESSetLogging(fgmres_solver, logging);
4365 }
4366 
4367 void HypreFGMRES::SetPrintLevel(int print_lvl)
4368 {
4369  HYPRE_ParCSRFlexGMRESSetPrintLevel(fgmres_solver, print_lvl);
4370 }
4371 
4373 {
4374  precond = &precond_;
4375  HYPRE_ParCSRFlexGMRESSetPrecond(fgmres_solver,
4376  precond_.SolveFcn(),
4377  precond_.SetupFcn(),
4378  precond_);
4379 }
4380 
4382 {
4383  int myid;
4384  HYPRE_Int time_index = 0;
4385  HYPRE_Int num_iterations;
4386  double final_res_norm;
4387  MPI_Comm comm;
4388  HYPRE_Int print_level;
4389 
4390  HYPRE_FlexGMRESGetPrintLevel(fgmres_solver, &print_level);
4391 
4392  HYPRE_ParCSRMatrixGetComm(*A, &comm);
4393 
4394  if (!iterative_mode)
4395  {
4396  x.HypreWrite();
4397  hypre_ParVectorSetConstantValues(x, 0.0);
4398  }
4399 
4400  b.HypreRead();
4401  x.HypreReadWrite();
4402 
4403  if (!setup_called)
4404  {
4405  if (print_level > 0)
4406  {
4407  time_index = hypre_InitializeTiming("FGMRES Setup");
4408  hypre_BeginTiming(time_index);
4409  }
4410 
4411  HYPRE_ParCSRFlexGMRESSetup(fgmres_solver, *A, b, x);
4412  setup_called = 1;
4413 
4414  if (print_level > 0)
4415  {
4416  hypre_EndTiming(time_index);
4417  hypre_PrintTiming("Setup phase times", comm);
4418  hypre_FinalizeTiming(time_index);
4419  hypre_ClearTiming();
4420  }
4421  }
4422 
4423  if (print_level > 0)
4424  {
4425  time_index = hypre_InitializeTiming("FGMRES Solve");
4426  hypre_BeginTiming(time_index);
4427  }
4428 
4429  HYPRE_ParCSRFlexGMRESSolve(fgmres_solver, *A, b, x);
4430 
4431  if (print_level > 0)
4432  {
4433  hypre_EndTiming(time_index);
4434  hypre_PrintTiming("Solve phase times", comm);
4435  hypre_FinalizeTiming(time_index);
4436  hypre_ClearTiming();
4437 
4438  HYPRE_ParCSRFlexGMRESGetNumIterations(fgmres_solver, &num_iterations);
4439  HYPRE_ParCSRFlexGMRESGetFinalRelativeResidualNorm(fgmres_solver,
4440  &final_res_norm);
4441 
4442  MPI_Comm_rank(comm, &myid);
4443 
4444  if (myid == 0)
4445  {
4446  mfem::out << "FGMRES Iterations = " << num_iterations << endl
4447  << "Final FGMRES Relative Residual Norm = " << final_res_norm
4448  << endl;
4449  }
4450  }
4451 }
4452 
4454 {
4455  HYPRE_ParCSRFlexGMRESDestroy(fgmres_solver);
4456 }
4457 
4458 
4460 {
4461  const HypreParMatrix *new_A = dynamic_cast<const HypreParMatrix *>(&op);
4462  MFEM_VERIFY(new_A, "new Operator must be a HypreParMatrix!");
4463 
4464  // update base classes: Operator, Solver, HypreSolver
4465  height = new_A->Height();
4466  width = new_A->Width();
4467  A = const_cast<HypreParMatrix *>(new_A);
4468  setup_called = 0;
4469  delete X;
4470  delete B;
4471  B = X = NULL;
4472  auxB.Delete(); auxB.Reset();
4473  auxX.Delete(); auxX.Reset();
4474 }
4475 
4476 
4478 {
4479  HYPRE_ParaSailsCreate(comm, &sai_precond);
4480  SetDefaultOptions();
4481 }
4482 
4484 {
4485  MPI_Comm comm;
4486 
4487  HYPRE_ParCSRMatrixGetComm(A, &comm);
4488 
4489  HYPRE_ParaSailsCreate(comm, &sai_precond);
4490  SetDefaultOptions();
4491 }
4492 
4493 void HypreParaSails::SetDefaultOptions()
4494 {
4495  int sai_max_levels = 1;
4496  double sai_threshold = 0.1;
4497  double sai_filter = 0.1;
4498  int sai_sym = 0;
4499  double sai_loadbal = 0.0;
4500  int sai_reuse = 0;
4501  int sai_logging = 1;
4502 
4503  HYPRE_ParaSailsSetParams(sai_precond, sai_threshold, sai_max_levels);
4504  HYPRE_ParaSailsSetFilter(sai_precond, sai_filter);
4505  HYPRE_ParaSailsSetSym(sai_precond, sai_sym);
4506  HYPRE_ParaSailsSetLoadbal(sai_precond, sai_loadbal);
4507  HYPRE_ParaSailsSetReuse(sai_precond, sai_reuse);
4508  HYPRE_ParaSailsSetLogging(sai_precond, sai_logging);
4509 }
4510 
4511 void HypreParaSails::ResetSAIPrecond(MPI_Comm comm)
4512 {
4513  HYPRE_Int sai_max_levels;
4514  HYPRE_Real sai_threshold;
4515  HYPRE_Real sai_filter;
4516  HYPRE_Int sai_sym;
4517  HYPRE_Real sai_loadbal;
4518  HYPRE_Int sai_reuse;
4519  HYPRE_Int sai_logging;
4520 
4521  // hypre_ParAMGData *amg_data = (hypre_ParAMGData *)sai_precond;
4522  HYPRE_ParaSailsGetNlevels(sai_precond, &sai_max_levels);
4523  HYPRE_ParaSailsGetThresh(sai_precond, &sai_threshold);
4524  HYPRE_ParaSailsGetFilter(sai_precond, &sai_filter);
4525  HYPRE_ParaSailsGetSym(sai_precond, &sai_sym);
4526  HYPRE_ParaSailsGetLoadbal(sai_precond, &sai_loadbal);
4527  HYPRE_ParaSailsGetReuse(sai_precond, &sai_reuse);
4528  HYPRE_ParaSailsGetLogging(sai_precond, &sai_logging);
4529 
4530  HYPRE_ParaSailsDestroy(sai_precond);
4531  HYPRE_ParaSailsCreate(comm, &sai_precond);
4532 
4533  HYPRE_ParaSailsSetParams(sai_precond, sai_threshold, sai_max_levels);
4534  HYPRE_ParaSailsSetFilter(sai_precond, sai_filter);
4535  HYPRE_ParaSailsSetSym(sai_precond, sai_sym);
4536  HYPRE_ParaSailsSetLoadbal(sai_precond, sai_loadbal);
4537  HYPRE_ParaSailsSetReuse(sai_precond, sai_reuse);
4538  HYPRE_ParaSailsSetLogging(sai_precond, sai_logging);
4539 }
4540 
4542 {
4543  const HypreParMatrix *new_A = dynamic_cast<const HypreParMatrix *>(&op);
4544  MFEM_VERIFY(new_A, "new Operator must be a HypreParMatrix!");
4545 
4546  if (A)
4547  {
4548  MPI_Comm comm;
4549  HYPRE_ParCSRMatrixGetComm(*A, &comm);
4550  ResetSAIPrecond(comm);
4551  }
4552 
4553  // update base classes: Operator, Solver, HypreSolver
4554  height = new_A->Height();
4555  width = new_A->Width();
4556  A = const_cast<HypreParMatrix *>(new_A);
4557  setup_called = 0;
4558  delete X;
4559  delete B;
4560  B = X = NULL;
4561  auxB.Delete(); auxB.Reset();
4562  auxX.Delete(); auxX.Reset();
4563 }
4564 
4565 void HypreParaSails::SetParams(double threshold, int max_levels)
4566 {
4567  HYPRE_ParaSailsSetParams(sai_precond, threshold, max_levels);
4568 }
4569 
4570 void HypreParaSails::SetFilter(double filter)
4571 {
4572  HYPRE_ParaSailsSetFilter(sai_precond, filter);
4573 }
4574 
4575 void HypreParaSails::SetLoadBal(double loadbal)
4576 {
4577  HYPRE_ParaSailsSetLoadbal(sai_precond, loadbal);
4578 }
4579 
4581 {
4582  HYPRE_ParaSailsSetReuse(sai_precond, reuse);
4583 }
4584 
4586 {
4587  HYPRE_ParaSailsSetLogging(sai_precond, logging);
4588 }
4589 
4591 {
4592  HYPRE_ParaSailsSetSym(sai_precond, sym);
4593 }
4594 
4596 {
4597  HYPRE_ParaSailsDestroy(sai_precond);
4598 }
4599 
4600 
4602 {
4603  HYPRE_EuclidCreate(comm, &euc_precond);
4604  SetDefaultOptions();
4605 }
4606 
4608 {
4609  MPI_Comm comm;
4610 
4611  HYPRE_ParCSRMatrixGetComm(A, &comm);
4612 
4613  HYPRE_EuclidCreate(comm, &euc_precond);
4614  SetDefaultOptions();
4615 }
4616 
4617 void HypreEuclid::SetDefaultOptions()
4618 {
4619  int euc_level = 1; // We use ILU(1)
4620  int euc_stats = 0; // No logging
4621  int euc_mem = 0; // No memory logging
4622  int euc_bj = 0; // 1: Use Block Jacobi
4623  int euc_ro_sc = 0; // 1: Use Row scaling
4624 
4625  HYPRE_EuclidSetLevel(euc_precond, euc_level);
4626  HYPRE_EuclidSetStats(euc_precond, euc_stats);
4627  HYPRE_EuclidSetMem(euc_precond, euc_mem);
4628  HYPRE_EuclidSetBJ(euc_precond, euc_bj);
4629  HYPRE_EuclidSetRowScale(euc_precond, euc_ro_sc);
4630 }
4631 
4632 void HypreEuclid::SetLevel(int level)
4633 {
4634  HYPRE_EuclidSetLevel(euc_precond, level);
4635 }
4636 
4637 void HypreEuclid::SetStats(int stats)
4638 {
4639  HYPRE_EuclidSetStats(euc_precond, stats);
4640 }
4641 
4643 {
4644  HYPRE_EuclidSetMem(euc_precond, mem);
4645 }
4646 
4648 {
4649  HYPRE_EuclidSetBJ(euc_precond, bj);
4650 }
4651 
4652 void HypreEuclid::SetRowScale(int row_scale)
4653 {
4654  HYPRE_EuclidSetRowScale(euc_precond, row_scale);
4655 }
4656 
4657 void HypreEuclid::ResetEuclidPrecond(MPI_Comm comm)
4658 {
4659  // Euclid does not seem to offer access to its current configuration, so we
4660  // simply reset it to its default options.
4661  HYPRE_EuclidDestroy(euc_precond);
4662  HYPRE_EuclidCreate(comm, &euc_precond);
4663 
4664  SetDefaultOptions();
4665 }
4666 
4668 {
4669  const HypreParMatrix *new_A = dynamic_cast<const HypreParMatrix *>(&op);
4670  MFEM_VERIFY(new_A, "new Operator must be a HypreParMatrix!");
4671 
4672  if (A)
4673  {
4674  MPI_Comm comm;
4675  HYPRE_ParCSRMatrixGetComm(*new_A, &comm);
4676  ResetEuclidPrecond(comm);
4677  }
4678 
4679  // update base classes: Operator, Solver, HypreSolver
4680  height = new_A->Height();
4681  width = new_A->Width();
4682  A = const_cast<HypreParMatrix *>(new_A);
4683  setup_called = 0;
4684  delete X;
4685  delete B;
4686  B = X = NULL;
4687  auxB.Delete(); auxB.Reset();
4688  auxX.Delete(); auxX.Reset();
4689 }
4690 
4692 {
4693  HYPRE_EuclidDestroy(euc_precond);
4694 }
4695 
4696 
4697 #if MFEM_HYPRE_VERSION >= 21900
4699 {
4700  HYPRE_ILUCreate(&ilu_precond);
4701  SetDefaultOptions();
4702 }
4703 
4704 void HypreILU::SetDefaultOptions()
4705 {
4706  // The type of incomplete LU used locally and globally (see class doc)
4707  HYPRE_Int ilu_type = 0; // ILU(k) locally and block Jacobi globally
4708  HYPRE_ILUSetType(ilu_precond, ilu_type);
4709 
4710  // Maximum iterations; 1 iter for preconditioning
4711  HYPRE_Int max_iter = 1;
4712  HYPRE_ILUSetMaxIter(ilu_precond, max_iter);
4713 
4714  // The tolerance when used as a smoother; set to 0.0 for preconditioner
4715  HYPRE_Real tol = 0.0;
4716  HYPRE_ILUSetTol(ilu_precond, tol);
4717 
4718  // Fill level for ILU(k)
4719  HYPRE_Int lev_fill = 1;
4720  HYPRE_ILUSetLevelOfFill(ilu_precond, lev_fill);
4721 
4722  // Local reordering scheme; 0 = no reordering, 1 = reverse Cuthill-McKee
4723  HYPRE_Int reorder_type = 1;
4724  HYPRE_ILUSetLocalReordering(ilu_precond, reorder_type);
4725 
4726  // Information print level; 0 = none, 1 = setup, 2 = solve, 3 = setup+solve
4727  HYPRE_Int print_level = 0;
4728  HYPRE_ILUSetPrintLevel(ilu_precond, print_level);
4729 }
4730 
4731 void HypreILU::ResetILUPrecond()
4732 {
4733  if (ilu_precond)
4734  {
4735  HYPRE_ILUDestroy(ilu_precond);
4736  }
4737  HYPRE_ILUCreate(&ilu_precond);
4738  SetDefaultOptions();
4739 }
4740 
4741 void HypreILU::SetLevelOfFill(HYPRE_Int lev_fill)
4742 {
4743  HYPRE_ILUSetLevelOfFill(ilu_precond, lev_fill);
4744 }
4745 
4746 void HypreILU::SetType(HYPRE_Int ilu_type)
4747 {
4748  HYPRE_ILUSetType(ilu_precond, ilu_type);
4749 }
4750 
4751 void HypreILU::SetMaxIter(HYPRE_Int max_iter)
4752 {
4753  HYPRE_ILUSetMaxIter(ilu_precond, max_iter);
4754 }
4755 
4756 void HypreILU::SetTol(HYPRE_Real tol)
4757 {
4758  HYPRE_ILUSetTol(ilu_precond, tol);
4759 }
4760 
4761 void HypreILU::SetLocalReordering(HYPRE_Int reorder_type)
4762 {
4763  HYPRE_ILUSetLocalReordering(ilu_precond, reorder_type);
4764 }
4765 
4766 void HypreILU::SetPrintLevel(HYPRE_Int print_level)
4767 {
4768  HYPRE_ILUSetPrintLevel(ilu_precond, print_level);
4769 }
4770 
4772 {
4773  const HypreParMatrix *new_A = dynamic_cast<const HypreParMatrix *>(&op);
4774  MFEM_VERIFY(new_A, "new Operator must be a HypreParMatrix!");
4775 
4776  if (A) { ResetILUPrecond(); }
4777 
4778  // update base classes: Operator, Solver, HypreSolver
4779  height = new_A->Height();
4780  width = new_A->Width();
4781  A = const_cast<HypreParMatrix *>(new_A);
4782  setup_called = 0;
4783  delete X;
4784  delete B;
4785  B = X = NULL;
4786  auxB.Delete(); auxB.Reset();
4787  auxX.Delete(); auxX.Reset();
4788 }
4789 
4791 {
4792  HYPRE_ILUDestroy(ilu_precond);
4793 }
4794 #endif
4795 
4796 
4798 {
4799  HYPRE_BoomerAMGCreate(&amg_precond);
4800  SetDefaultOptions();
4801 }
4802 
4804 {
4805  HYPRE_BoomerAMGCreate(&amg_precond);
4806  SetDefaultOptions();
4807 }
4808 
4809 void HypreBoomerAMG::SetDefaultOptions()
4810 {
4811 #if !defined(HYPRE_USING_GPU)
4812  // AMG coarsening options:
4813  int coarsen_type = 10; // 10 = HMIS, 8 = PMIS, 6 = Falgout, 0 = CLJP
4814  int agg_levels = 1; // number of aggressive coarsening levels
4815  double theta = 0.25; // strength threshold: 0.25, 0.5, 0.8
4816 
4817  // AMG interpolation options:
4818  int interp_type = 6; // 6 = extended+i, 0 = classical
4819  int Pmax = 4; // max number of elements per row in P
4820 
4821  // AMG relaxation options:
4822  int relax_type = 8; // 8 = l1-GS, 6 = symm. GS, 3 = GS, 18 = l1-Jacobi
4823  int relax_sweeps = 1; // relaxation sweeps on each level
4824 
4825  // Additional options:
4826  int print_level = 1; // print AMG iterations? 1 = no, 2 = yes
4827  int max_levels = 25; // max number of levels in AMG hierarchy
4828 #else
4829  // AMG coarsening options:
4830  int coarsen_type = 8; // 10 = HMIS, 8 = PMIS, 6 = Falgout, 0 = CLJP
4831  int agg_levels = 0; // number of aggressive coarsening levels
4832  double theta = 0.25; // strength threshold: 0.25, 0.5, 0.8
4833 
4834  // AMG interpolation options:
4835  int interp_type = 6; // 6 = extended+i, or 18 = extended+e
4836  int Pmax = 4; // max number of elements per row in P
4837 
4838  // AMG relaxation options:
4839  int relax_type = 18; // 18 = l1-Jacobi, or 16 = Chebyshev
4840  int relax_sweeps = 1; // relaxation sweeps on each level
4841 
4842  // Additional options:
4843  int print_level = 1; // print AMG iterations? 1 = no, 2 = yes
4844  int max_levels = 25; // max number of levels in AMG hierarchy
4845 #endif
4846 
4847  HYPRE_BoomerAMGSetCoarsenType(amg_precond, coarsen_type);
4848  HYPRE_BoomerAMGSetAggNumLevels(amg_precond, agg_levels);
4849  HYPRE_BoomerAMGSetRelaxType(amg_precond, relax_type);
4850  // default in hypre is 1.0 with some exceptions, e.g. for relax_type = 7
4851  // HYPRE_BoomerAMGSetRelaxWt(amg_precond, 1.0);
4852  HYPRE_BoomerAMGSetNumSweeps(amg_precond, relax_sweeps);
4853  HYPRE_BoomerAMGSetStrongThreshold(amg_precond, theta);
4854  HYPRE_BoomerAMGSetInterpType(amg_precond, interp_type);
4855  HYPRE_BoomerAMGSetPMaxElmts(amg_precond, Pmax);
4856  HYPRE_BoomerAMGSetPrintLevel(amg_precond, print_level);
4857  HYPRE_BoomerAMGSetMaxLevels(amg_precond, max_levels);
4858 
4859  // Use as a preconditioner (one V-cycle, zero tolerance)
4860  HYPRE_BoomerAMGSetMaxIter(amg_precond, 1);
4861  HYPRE_BoomerAMGSetTol(amg_precond, 0.0);
4862 }
4863 
4864 void HypreBoomerAMG::ResetAMGPrecond()
4865 {
4866  HYPRE_Int coarsen_type;
4867  HYPRE_Int agg_levels;
4868  HYPRE_Int relax_type;
4869  HYPRE_Int relax_sweeps;
4870  HYPRE_Real theta;
4871  HYPRE_Int interp_type;
4872  HYPRE_Int Pmax;
4873  HYPRE_Int print_level;
4874  HYPRE_Int max_levels;
4875  HYPRE_Int dim;
4876  HYPRE_Int nrbms = rbms.Size();
4877  HYPRE_Int nodal;
4878  HYPRE_Int nodal_diag;
4879  HYPRE_Int relax_coarse;
4880  HYPRE_Int interp_vec_variant;
4881  HYPRE_Int q_max;
4882  HYPRE_Int smooth_interp_vectors;
4883  HYPRE_Int interp_refine;
4884 
4885  hypre_ParAMGData *amg_data = (hypre_ParAMGData *)amg_precond;
4886 
4887  // read options from amg_precond
4888  HYPRE_BoomerAMGGetCoarsenType(amg_precond, &coarsen_type);
4889  agg_levels = hypre_ParAMGDataAggNumLevels(amg_data);
4890  relax_type = hypre_ParAMGDataUserRelaxType(amg_data);
4891  relax_sweeps = hypre_ParAMGDataUserNumSweeps(amg_data);
4892  HYPRE_BoomerAMGGetStrongThreshold(amg_precond, &theta);
4893  hypre_BoomerAMGGetInterpType(amg_precond, &interp_type);
4894  HYPRE_BoomerAMGGetPMaxElmts(amg_precond, &Pmax);
4895  HYPRE_BoomerAMGGetPrintLevel(amg_precond, &print_level);
4896  HYPRE_BoomerAMGGetMaxLevels(amg_precond, &max_levels);
4897  HYPRE_BoomerAMGGetNumFunctions(amg_precond, &dim);
4898  if (nrbms) // elasticity solver options
4899  {
4900  nodal = hypre_ParAMGDataNodal(amg_data);
4901  nodal_diag = hypre_ParAMGDataNodalDiag(amg_data);
4902  HYPRE_BoomerAMGGetCycleRelaxType(amg_precond, &relax_coarse, 3);
4903  interp_vec_variant = hypre_ParAMGInterpVecVariant(amg_data);
4904  q_max = hypre_ParAMGInterpVecQMax(amg_data);
4905  smooth_interp_vectors = hypre_ParAMGSmoothInterpVectors(amg_data);
4906  interp_refine = hypre_ParAMGInterpRefine(amg_data);
4907  }
4908 
4909  HYPRE_BoomerAMGDestroy(amg_precond);
4910  HYPRE_BoomerAMGCreate(&amg_precond);
4911 
4912  HYPRE_BoomerAMGSetCoarsenType(amg_precond, coarsen_type);
4913  HYPRE_BoomerAMGSetAggNumLevels(amg_precond, agg_levels);
4914  HYPRE_BoomerAMGSetRelaxType(amg_precond, relax_type);
4915  HYPRE_BoomerAMGSetNumSweeps(amg_precond, relax_sweeps);
4916  HYPRE_BoomerAMGSetMaxLevels(amg_precond, max_levels);
4917  HYPRE_BoomerAMGSetTol(amg_precond, 0.0);
4918  HYPRE_BoomerAMGSetMaxIter(amg_precond, 1); // one V-cycle
4919  HYPRE_BoomerAMGSetStrongThreshold(amg_precond, theta);
4920  HYPRE_BoomerAMGSetInterpType(amg_precond, interp_type);
4921  HYPRE_BoomerAMGSetPMaxElmts(amg_precond, Pmax);
4922  HYPRE_BoomerAMGSetPrintLevel(amg_precond, print_level);
4923  HYPRE_BoomerAMGSetNumFunctions(amg_precond, dim);
4924  if (nrbms)
4925  {
4926  HYPRE_BoomerAMGSetNodal(amg_precond, nodal);
4927  HYPRE_BoomerAMGSetNodalDiag(amg_precond, nodal_diag);
4928  HYPRE_BoomerAMGSetCycleRelaxType(amg_precond, relax_coarse, 3);
4929  HYPRE_BoomerAMGSetInterpVecVariant(amg_precond, interp_vec_variant);
4930  HYPRE_BoomerAMGSetInterpVecQMax(amg_precond, q_max);
4931  HYPRE_BoomerAMGSetSmoothInterpVectors(amg_precond, smooth_interp_vectors);
4932  HYPRE_BoomerAMGSetInterpRefine(amg_precond, interp_refine);
4933  RecomputeRBMs();
4934  HYPRE_BoomerAMGSetInterpVectors(amg_precond, rbms.Size(), rbms.GetData());
4935  }
4936 }
4937 
4939 {
4940  const HypreParMatrix *new_A = dynamic_cast<const HypreParMatrix *>(&op);
4941  MFEM_VERIFY(new_A, "new Operator must be a HypreParMatrix!");
4942 
4943  if (A) { ResetAMGPrecond(); }
4944 
4945  // update base classes: Operator, Solver, HypreSolver
4946  height = new_A->Height();
4947  width = new_A->Width();
4948  A = const_cast<HypreParMatrix *>(new_A);
4949  setup_called = 0;
4950  delete X;
4951  delete B;
4952  B = X = NULL;
4953  auxB.Delete(); auxB.Reset();
4954  auxX.Delete(); auxX.Reset();
4955 }
4956 
4957 void HypreBoomerAMG::SetSystemsOptions(int dim, bool order_bynodes)
4958 {
4959  HYPRE_BoomerAMGSetNumFunctions(amg_precond, dim);
4960 
4961  // The default "system" ordering in hypre is Ordering::byVDIM. When we are
4962  // using Ordering::byNODES, we have to specify the ordering explicitly with
4963  // HYPRE_BoomerAMGSetDofFunc as in the following code.
4964  if (order_bynodes)
4965  {
4966  // Generate DofFunc mapping on the host
4967  HYPRE_Int *h_mapping = mfem_hypre_CTAlloc_host(HYPRE_Int, height);
4968  int h_nnodes = height / dim; // nodes owned in linear algebra (not fem)
4969  MFEM_VERIFY(height % dim == 0, "Ordering does not work as claimed!");
4970  int k = 0;
4971  for (int i = 0; i < dim; ++i)
4972  {
4973  for (int j = 0; j < h_nnodes; ++j)
4974  {
4975  h_mapping[k++] = i;
4976  }
4977  }
4978 
4979  // After the addition of hypre_IntArray, mapping is assumed
4980  // to be a device pointer. Previously, it was assumed to be
4981  // a host pointer.
4982 #if defined(hypre_IntArrayData) && defined(HYPRE_USING_GPU)
4983  HYPRE_Int *mapping = mfem_hypre_CTAlloc(HYPRE_Int, height);
4984  hypre_TMemcpy(mapping, h_mapping, HYPRE_Int, height,
4985  HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
4986  mfem_hypre_TFree_host(h_mapping);
4987 #else
4988  HYPRE_Int *mapping = h_mapping;
4989 #endif
4990 
4991  // hypre actually deletes the mapping pointer in HYPRE_BoomerAMGDestroy,
4992  // so we don't need to track it
4993  HYPRE_BoomerAMGSetDofFunc(amg_precond, mapping);
4994  }
4995 
4996  // More robust options with respect to convergence
4997  HYPRE_BoomerAMGSetAggNumLevels(amg_precond, 0);
4998  HYPRE_BoomerAMGSetStrongThreshold(amg_precond, 0.5);
4999 }
5000 
5001 // Rotational rigid-body mode functions, used in SetElasticityOptions()
5002 static void func_rxy(const Vector &x, Vector &y)
5003 {
5004  y = 0.0; y(0) = x(1); y(1) = -x(0);
5005 }
5006 static void func_ryz(const Vector &x, Vector &y)
5007 {
5008  y = 0.0; y(1) = x(2); y(2) = -x(1);
5009 }
5010 static void func_rzx(const Vector &x, Vector &y)
5011 {
5012  y = 0.0; y(2) = x(0); y(0) = -x(2);
5013 }
5014 
5015 void HypreBoomerAMG::RecomputeRBMs()
5016 {
5017  int nrbms;
5018  Array<HypreParVector*> gf_rbms;
5019  int dim = fespace->GetParMesh()->Dimension();
5020 
5021  for (int i = 0; i < rbms.Size(); i++)
5022  {
5023  HYPRE_ParVectorDestroy(rbms[i]);
5024  }
5025 
5026  if (dim == 2)
5027  {
5028  nrbms = 1;
5029 
5030  VectorFunctionCoefficient coeff_rxy(2, func_rxy);
5031 
5032  ParGridFunction rbms_rxy(fespace);
5033  rbms_rxy.ProjectCoefficient(coeff_rxy);
5034 
5035  rbms.SetSize(nrbms);
5036  gf_rbms.SetSize(nrbms);
5037  gf_rbms[0] = fespace->NewTrueDofVector();
5038  rbms_rxy.GetTrueDofs(*gf_rbms[0]);
5039  }
5040  else if (dim == 3)
5041  {
5042  nrbms = 3;
5043 
5044  VectorFunctionCoefficient coeff_rxy(3, func_rxy);
5045  VectorFunctionCoefficient coeff_ryz(3, func_ryz);
5046  VectorFunctionCoefficient coeff_rzx(3, func_rzx);
5047 
5048  ParGridFunction rbms_rxy(fespace);
5049  ParGridFunction rbms_ryz(fespace);
5050  ParGridFunction rbms_rzx(fespace);
5051  rbms_rxy.ProjectCoefficient(coeff_rxy);
5052  rbms_ryz.ProjectCoefficient(coeff_ryz);
5053  rbms_rzx.ProjectCoefficient(coeff_rzx);
5054 
5055  rbms.SetSize(nrbms);
5056  gf_rbms.SetSize(nrbms);
5057  gf_rbms[0] = fespace->NewTrueDofVector();
5058  gf_rbms[1] = fespace->NewTrueDofVector();
5059  gf_rbms[2] = fespace->NewTrueDofVector();
5060  rbms_rxy.GetTrueDofs(*gf_rbms[0]);
5061  rbms_ryz.GetTrueDofs(*gf_rbms[1]);
5062  rbms_rzx.GetTrueDofs(*gf_rbms[2]);
5063  }
5064  else
5065  {
5066  nrbms = 0;
5067  rbms.SetSize(nrbms);
5068  }
5069 
5070  // Transfer the RBMs from the ParGridFunction to the HYPRE_ParVector objects
5071  for (int i = 0; i < nrbms; i++)
5072  {
5073  rbms[i] = gf_rbms[i]->StealParVector();
5074  delete gf_rbms[i];
5075  }
5076 }
5077 
5079 {
5080 #ifdef HYPRE_USING_GPU
5081  MFEM_ABORT("this method is not supported in hypre built with GPU support");
5082 #endif
5083 
5084  // Save the finite element space to support multiple calls to SetOperator()
5085  this->fespace = fespace_;
5086 
5087  // Make sure the systems AMG options are set
5088  int dim = fespace_->GetParMesh()->Dimension();
5090 
5091  // Nodal coarsening options (nodal coarsening is required for this solver)
5092  // See hypre's new_ij driver and the paper for descriptions.
5093  int nodal = 4; // strength reduction norm: 1, 3 or 4
5094  int nodal_diag = 1; // diagonal in strength matrix: 0, 1 or 2
5095  int relax_coarse = 8; // smoother on the coarsest grid: 8, 99 or 29
5096 
5097  // Elasticity interpolation options
5098  int interp_vec_variant = 2; // 1 = GM-1, 2 = GM-2, 3 = LN
5099  int q_max = 4; // max elements per row for each Q
5100  int smooth_interp_vectors = 1; // smooth the rigid-body modes?
5101 
5102  // Optionally pre-process the interpolation matrix through iterative weight
5103  // refinement (this is generally applicable for any system)
5104  int interp_refine = 1;
5105 
5106  HYPRE_BoomerAMGSetNodal(amg_precond, nodal);
5107  HYPRE_BoomerAMGSetNodalDiag(amg_precond, nodal_diag);
5108  HYPRE_BoomerAMGSetCycleRelaxType(amg_precond, relax_coarse, 3);
5109  HYPRE_BoomerAMGSetInterpVecVariant(amg_precond, interp_vec_variant);
5110  HYPRE_BoomerAMGSetInterpVecQMax(amg_precond, q_max);
5111  HYPRE_BoomerAMGSetSmoothInterpVectors(amg_precond, smooth_interp_vectors);
5112  HYPRE_BoomerAMGSetInterpRefine(amg_precond, interp_refine);
5113 
5114  RecomputeRBMs();
5115  HYPRE_BoomerAMGSetInterpVectors(amg_precond, rbms.Size(), rbms.GetData());
5116 
5117  // The above BoomerAMG options may result in singular matrices on the coarse
5118  // grids, which are handled correctly in hypre's Solve method, but can produce
5119  // hypre errors in the Setup (specifically in the l1 row norm computation).
5120  // See the documentation of SetErrorMode() for more details.
5122 }
5123 
5124 #if MFEM_HYPRE_VERSION >= 21800
5125 
5127  const std::string &prerelax,
5128  const std::string &postrelax)
5129 {
5130  // Hypre parameters
5131  int Sabs = 0;
5132  int interp_type = 100;
5133  int relax_type = 10;
5134  int coarsen_type = 6;
5135  double strength_tolC = 0.1;
5136  double strength_tolR = 0.01;
5137  double filter_tolR = 0.0;
5138  double filterA_tol = 0.0;
5139 
5140  // Set relaxation on specified grid points
5141  int ns_down = 0, ns_up = 0, ns_coarse; // init to suppress gcc warnings
5142  if (distanceR > 0)
5143  {
5144  ns_down = prerelax.length();
5145  ns_up = postrelax.length();
5146  ns_coarse = 1;
5147 
5148  // Array to store relaxation scheme and pass to Hypre
5149  HYPRE_Int **grid_relax_points = mfem_hypre_TAlloc(HYPRE_Int*, 4);
5150  grid_relax_points[0] = NULL;
5151  grid_relax_points[1] = mfem_hypre_TAlloc(HYPRE_Int, ns_down);
5152  grid_relax_points[2] = mfem_hypre_TAlloc(HYPRE_Int, ns_up);
5153  grid_relax_points[3] = mfem_hypre_TAlloc(HYPRE_Int, 1);
5154  grid_relax_points[3][0] = 0;
5155 
5156  // set down relax scheme
5157  for (int i = 0; i<ns_down; i++)
5158  {
5159  if (prerelax[i] == 'F')
5160  {
5161  grid_relax_points[1][i] = -1;
5162  }
5163  else if (prerelax[i] == 'C')
5164  {
5165  grid_relax_points[1][i] = 1;
5166  }
5167  else if (prerelax[i] == 'A')
5168  {
5169  grid_relax_points[1][i] = 0;
5170  }
5171  }
5172 
5173  // set up relax scheme
5174  for (int i = 0; i<ns_up; i++)
5175  {
5176  if (postrelax[i] == 'F')
5177  {
5178  grid_relax_points[2][i] = -1;
5179  }
5180  else if (postrelax[i] == 'C')
5181  {
5182  grid_relax_points[2][i] = 1;
5183  }
5184  else if (postrelax[i] == 'A')
5185  {
5186  grid_relax_points[2][i] = 0;
5187  }
5188  }
5189 
5190  HYPRE_BoomerAMGSetRestriction(amg_precond, distanceR);
5191 
5192  HYPRE_BoomerAMGSetGridRelaxPoints(amg_precond, grid_relax_points);
5193 
5194  HYPRE_BoomerAMGSetInterpType(amg_precond, interp_type);
5195  }
5196 
5197  if (Sabs)
5198  {
5199  HYPRE_BoomerAMGSetSabs(amg_precond, Sabs);
5200  }
5201 
5202  HYPRE_BoomerAMGSetCoarsenType(amg_precond, coarsen_type);
5203 
5204  // does not support aggressive coarsening
5205  HYPRE_BoomerAMGSetAggNumLevels(amg_precond, 0);
5206 
5207  HYPRE_BoomerAMGSetStrongThreshold(amg_precond, strength_tolC);
5208 
5209  if (distanceR > 0)
5210  {
5211  HYPRE_BoomerAMGSetStrongThresholdR(amg_precond, strength_tolR);
5212  HYPRE_BoomerAMGSetFilterThresholdR(amg_precond, filter_tolR);
5213  }
5214 
5215  if (relax_type > -1)
5216  {
5217  HYPRE_BoomerAMGSetRelaxType(amg_precond, relax_type);
5218  }
5219 
5220  if (distanceR > 0)
5221  {
5222  HYPRE_BoomerAMGSetCycleNumSweeps(amg_precond, ns_coarse, 3);
5223  HYPRE_BoomerAMGSetCycleNumSweeps(amg_precond, ns_down, 1);
5224  HYPRE_BoomerAMGSetCycleNumSweeps(amg_precond, ns_up, 2);
5225 
5226  HYPRE_BoomerAMGSetADropTol(amg_precond, filterA_tol);
5227  // type = -1: drop based on row inf-norm
5228  HYPRE_BoomerAMGSetADropType(amg_precond, -1);
5229  }
5230 }
5231 
5232 #endif
5233 
5235 {
5236  for (int i = 0; i < rbms.Size(); i++)
5237  {
5238  HYPRE_ParVectorDestroy(rbms[i]);
5239  }
5240 
5241  HYPRE_BoomerAMGDestroy(amg_precond);
5242 }
5243 
5245 {
5246  Init(edge_fespace);
5247 }
5248 
5250  : HypreSolver(&A)
5251 {
5252  Init(edge_fespace);
5253 }
5254 
5257  : HypreSolver(&A),
5258  x(x_),
5259  y(y_),
5260  z(z_),
5261  G(G_),
5262  Pi(NULL),
5263  Pix(NULL),
5264  Piy(NULL),
5265  Piz(NULL)
5266 {
5267  MFEM_ASSERT(G != NULL, "");
5268  MFEM_ASSERT(x != NULL, "");
5269  MFEM_ASSERT(y != NULL, "");
5270  int sdim = (z == NULL) ? 2 : 3;
5271  int cycle_type = 13;
5272 
5273  MakeSolver(sdim, cycle_type);
5274 
5275  HYPRE_ParVector pz = z ? static_cast<HYPRE_ParVector>(*z) : NULL;
5276  HYPRE_AMSSetCoordinateVectors(ams, *x, *y, pz);
5277  HYPRE_AMSSetDiscreteGradient(ams, *G);
5278 }
5279 
5280 void HypreAMS::MakeSolver(int sdim, int cycle_type)
5281 {
5282  int rlx_sweeps = 1;
5283  double rlx_weight = 1.0;
5284  double rlx_omega = 1.0;
5285 #if !defined(HYPRE_USING_GPU)
5286  int amg_coarsen_type = 10;
5287  int amg_agg_levels = 1;
5288  int amg_rlx_type = 8;
5289  int rlx_type = 2;
5290  double theta = 0.25;
5291  int amg_interp_type = 6;
5292  int amg_Pmax = 4;
5293 #else
5294  int amg_coarsen_type = 8;
5295  int amg_agg_levels = 0;
5296  int amg_rlx_type = 18;
5297  int rlx_type = 1;
5298  double theta = 0.25;
5299  int amg_interp_type = 6;
5300  int amg_Pmax = 4;
5301 #endif
5302 
5303  space_dim = sdim;
5304  ams_cycle_type = cycle_type;
5305  HYPRE_AMSCreate(&ams);
5306 
5307  HYPRE_AMSSetDimension(ams, sdim); // 2D H(div) and 3D H(curl) problems
5308  HYPRE_AMSSetTol(ams, 0.0);
5309  HYPRE_AMSSetMaxIter(ams, 1); // use as a preconditioner
5310  HYPRE_AMSSetCycleType(ams, cycle_type);
5311  HYPRE_AMSSetPrintLevel(ams, 1);
5312 
5313  // set additional AMS options
5314  HYPRE_AMSSetSmoothingOptions(ams, rlx_type, rlx_sweeps, rlx_weight, rlx_omega);
5315  HYPRE_AMSSetAlphaAMGOptions(ams, amg_coarsen_type, amg_agg_levels, amg_rlx_type,
5316  theta, amg_interp_type, amg_Pmax);
5317  HYPRE_AMSSetBetaAMGOptions(ams, amg_coarsen_type, amg_agg_levels, amg_rlx_type,
5318  theta, amg_interp_type, amg_Pmax);
5319 
5320  HYPRE_AMSSetAlphaAMGCoarseRelaxType(ams, amg_rlx_type);
5321  HYPRE_AMSSetBetaAMGCoarseRelaxType(ams, amg_rlx_type);
5322 
5323  // The AMS preconditioner may sometimes require inverting singular matrices
5324  // with BoomerAMG, which are handled correctly in hypre's Solve method, but
5325  // can produce hypre errors in the Setup (specifically in the l1 row norm
5326  // computation). See the documentation of SetErrorMode() for more details.
5328 }
5329 
5330 void HypreAMS::MakeGradientAndInterpolation(
5331  ParFiniteElementSpace *edge_fespace, int cycle_type)
5332 {
5333  int dim = edge_fespace->GetMesh()->Dimension();
5334  int sdim = edge_fespace->GetMesh()->SpaceDimension();
5335  const FiniteElementCollection *edge_fec = edge_fespace->FEColl();
5336 
5337  bool trace_space, rt_trace_space;
5338  ND_Trace_FECollection *nd_tr_fec = NULL;
5339  trace_space = dynamic_cast<const ND_Trace_FECollection*>(edge_fec);
5340  rt_trace_space = dynamic_cast<const RT_Trace_FECollection*>(edge_fec);
5341  trace_space = trace_space || rt_trace_space;
5342 
5343  MFEM_VERIFY(!edge_fespace->IsVariableOrder(), "");
5344  int p = edge_fec->GetOrder();
5345 
5346  ParMesh *pmesh = edge_fespace->GetParMesh();
5347  if (rt_trace_space)
5348  {
5349  nd_tr_fec = new ND_Trace_FECollection(p, dim);
5350  edge_fespace = new ParFiniteElementSpace(pmesh, nd_tr_fec);
5351  }
5352 
5353  // define the nodal linear finite element space associated with edge_fespace
5354  FiniteElementCollection *vert_fec;
5355  if (trace_space)
5356  {
5357  vert_fec = new H1_Trace_FECollection(p, dim);
5358  }
5359  else
5360  {
5361  vert_fec = new H1_FECollection(p, dim);
5362  }
5363  ParFiniteElementSpace *vert_fespace = new ParFiniteElementSpace(pmesh,
5364  vert_fec);
5365 
5366  // generate and set the vertex coordinates
5367  if (p == 1 && pmesh->GetNodes() == NULL)
5368  {
5369  ParGridFunction x_coord(vert_fespace);
5370  ParGridFunction y_coord(vert_fespace);
5371  ParGridFunction z_coord(vert_fespace);
5372  double *coord;
5373  for (int i = 0; i < pmesh->GetNV(); i++)
5374  {
5375  coord = pmesh -> GetVertex(i);
5376  x_coord(i) = coord[0];
5377  if (sdim >= 2) { y_coord(i) = coord[1]; }
5378  if (sdim == 3) { z_coord(i) = coord[2]; }
5379  }
5380  x = x_coord.ParallelProject();
5381  y = NULL;
5382  z = NULL;
5383  x->HypreReadWrite();
5384 
5385  if (sdim >= 2)
5386  {
5387  y = y_coord.ParallelProject();
5388  y->HypreReadWrite();
5389  }
5390  if (sdim == 3)
5391  {
5392  z = z_coord.ParallelProject();
5393  z->HypreReadWrite();
5394  }
5395 
5396  HYPRE_AMSSetCoordinateVectors(ams,
5397  x ? (HYPRE_ParVector)(*x) : NULL,
5398  y ? (HYPRE_ParVector)(*y) : NULL,
5399  z ? (HYPRE_ParVector)(*z) : NULL);
5400  }
5401  else
5402  {
5403  x = NULL;
5404  y = NULL;
5405  z = NULL;
5406  }
5407 
5408  // generate and set the discrete gradient
5409  ParDiscreteLinearOperator *grad;
5410  grad = new ParDiscreteLinearOperator(vert_fespace, edge_fespace);
5411  if (trace_space)
5412  {
5413  grad->AddTraceFaceInterpolator(new GradientInterpolator);
5414  }
5415  else
5416  {
5417  grad->AddDomainInterpolator(new GradientInterpolator);
5418  }
5419  grad->Assemble();
5420  grad->Finalize();
5421  G = grad->ParallelAssemble();
5422  HYPRE_AMSSetDiscreteGradient(ams, *G);
5423  delete grad;
5424 
5425  // generate and set the Nedelec interpolation matrices
5426  Pi = Pix = Piy = Piz = NULL;
5427  if (p > 1 || pmesh->GetNodes() != NULL)
5428  {
5429  ParFiniteElementSpace *vert_fespace_d
5430  = new ParFiniteElementSpace(pmesh, vert_fec, sdim, Ordering::byVDIM);
5431 
5432  ParDiscreteLinearOperator *id_ND;
5433  id_ND = new ParDiscreteLinearOperator(vert_fespace_d, edge_fespace);
5434  if (trace_space)
5435  {
5436  id_ND->AddTraceFaceInterpolator(new IdentityInterpolator);
5437  }
5438  else
5439  {
5440  id_ND->AddDomainInterpolator(new IdentityInterpolator);
5441  }
5442  id_ND->Assemble();
5443  id_ND->Finalize();
5444 
5445  if (cycle_type < 10)
5446  {
5447  Pi = id_ND->ParallelAssemble();
5448  }
5449  else
5450  {
5451  Array2D<HypreParMatrix *> Pi_blocks;
5452  id_ND->GetParBlocks(Pi_blocks);
5453  Pix = Pi_blocks(0,0);
5454  if (sdim >= 2) { Piy = Pi_blocks(0,1); }
5455  if (sdim == 3) { Piz = Pi_blocks(0,2); }
5456  }
5457 
5458  delete id_ND;
5459 
5460  HYPRE_ParCSRMatrix HY_Pi = (Pi) ? (HYPRE_ParCSRMatrix) *Pi : NULL;
5461  HYPRE_ParCSRMatrix HY_Pix = (Pix) ? (HYPRE_ParCSRMatrix) *Pix : NULL;
5462  HYPRE_ParCSRMatrix HY_Piy = (Piy) ? (HYPRE_ParCSRMatrix) *Piy : NULL;
5463  HYPRE_ParCSRMatrix HY_Piz = (Piz) ? (HYPRE_ParCSRMatrix) *Piz : NULL;
5464  HYPRE_AMSSetInterpolations(ams, HY_Pi, HY_Pix, HY_Piy, HY_Piz);
5465 
5466  delete vert_fespace_d;
5467  }
5468 
5469  delete vert_fespace;
5470  delete vert_fec;
5471 
5472  if (rt_trace_space)
5473  {
5474  delete edge_fespace;
5475  delete nd_tr_fec;
5476  }
5477 }
5478 
5479 void HypreAMS::Init(ParFiniteElementSpace *edge_fespace)
5480 {
5481  int cycle_type = 13;
5482  int sdim = edge_fespace->GetMesh()->SpaceDimension();
5483  MakeSolver(sdim, cycle_type);
5484  MakeGradientAndInterpolation(edge_fespace, cycle_type);
5485 }
5486 
5487 void HypreAMS::ResetAMSPrecond()
5488 {
5489 #if MFEM_HYPRE_VERSION >= 22600
5490  /* Read options from ams */
5491  auto *ams_data = (hypre_AMSData *)ams;
5492 
5493  /* Space dimension */
5494  HYPRE_Int dim = hypre_AMSDataDimension(ams_data);
5495 
5496  /* Vertex space data */
5497  hypre_ParCSRMatrix *hy_G = hypre_AMSDataDiscreteGradient(ams_data);
5498 
5499  HYPRE_Int beta_is_zero = hypre_AMSDataBetaIsZero(ams_data);
5500 
5501  /* Vector vertex space data */
5502  hypre_ParCSRMatrix *hy_Pi hypre_AMSDataPiInterpolation(ams_data);
5503  hypre_ParCSRMatrix *hy_Pix = ams_data->Pix;
5504  hypre_ParCSRMatrix *hy_Piy = ams_data->Piy;
5505  hypre_ParCSRMatrix *hy_Piz = ams_data->Piz;
5506  HYPRE_Int owns_Pi = hypre_AMSDataOwnsPiInterpolation(ams_data);
5507  if (owns_Pi)
5508  {
5509  ams_data->owns_Pi = 0; // we're stealing Pi
5510  }
5511 
5512  /* Coordinates of the vertices */
5513  hypre_ParVector *hy_x = hypre_AMSDataVertexCoordinateX(ams_data);
5514  hypre_ParVector *hy_y = hypre_AMSDataVertexCoordinateY(ams_data);
5515  hypre_ParVector *hy_z = hypre_AMSDataVertexCoordinateZ(ams_data);
5516 
5517  /* Solver options */
5518  HYPRE_Int maxit = hypre_AMSDataMaxIter(ams_data);
5519  HYPRE_Real tol = hypre_AMSDataTol(ams_data);
5520  HYPRE_Int cycle_type = hypre_AMSDataCycleType(ams_data);
5521  HYPRE_Int ams_print_level = hypre_AMSDataPrintLevel(ams_data);
5522 
5523  /* Smoothing and AMG options */
5524  HYPRE_Int A_relax_type = hypre_AMSDataARelaxType(ams_data);
5525  HYPRE_Int A_relax_times = hypre_AMSDataARelaxTimes(ams_data);
5526  HYPRE_Real A_relax_weight = hypre_AMSDataARelaxWeight(ams_data);
5527  HYPRE_Real A_omega = hypre_AMSDataAOmega(ams_data);
5528  HYPRE_Int A_cheby_order = hypre_AMSDataAChebyOrder(ams_data);
5529  HYPRE_Real A_cheby_fraction = hypre_AMSDataAChebyFraction(ams_data);
5530 
5531  HYPRE_Int B_Pi_coarsen_type = hypre_AMSDataPoissonAlphaAMGCoarsenType(ams_data);
5532  HYPRE_Int B_Pi_agg_levels = hypre_AMSDataPoissonAlphaAMGAggLevels(ams_data);
5533  HYPRE_Int B_Pi_relax_type = hypre_AMSDataPoissonAlphaAMGRelaxType(ams_data);
5534  HYPRE_Int B_Pi_coarse_relax_type = ams_data->B_Pi_coarse_relax_type;
5535  HYPRE_Real B_Pi_theta = hypre_AMSDataPoissonAlphaAMGStrengthThreshold(ams_data);
5536  HYPRE_Int B_Pi_interp_type = ams_data->B_Pi_interp_type;
5537  HYPRE_Int B_Pi_Pmax = ams_data->B_Pi_Pmax;
5538 
5539  HYPRE_Int B_G_coarsen_type = hypre_AMSDataPoissonBetaAMGCoarsenType(ams_data);
5540  HYPRE_Int B_G_agg_levels = hypre_AMSDataPoissonBetaAMGAggLevels(ams_data);
5541  HYPRE_Int B_G_relax_type = hypre_AMSDataPoissonBetaAMGRelaxType(ams_data);
5542  HYPRE_Int B_G_coarse_relax_type = ams_data->B_G_coarse_relax_type;
5543  HYPRE_Real B_G_theta = hypre_AMSDataPoissonBetaAMGStrengthThreshold(ams_data);
5544  HYPRE_Int B_G_interp_type = ams_data->B_G_interp_type;
5545  HYPRE_Int B_G_Pmax = ams_data->B_G_Pmax;
5546 
5547  HYPRE_AMSDestroy(ams);
5548  HYPRE_AMSCreate(&ams);
5549  ams_data = (hypre_AMSData *)ams;
5550 
5551  HYPRE_AMSSetDimension(ams, dim); // 2D H(div) and 3D H(curl) problems
5552  HYPRE_AMSSetTol(ams, tol);
5553  HYPRE_AMSSetMaxIter(ams, maxit); // use as a preconditioner
5554  HYPRE_AMSSetCycleType(ams, cycle_type);
5555  HYPRE_AMSSetPrintLevel(ams, ams_print_level);
5556 
5557  HYPRE_AMSSetCoordinateVectors(ams, hy_x, hy_y, hy_z);
5558 
5559  HYPRE_AMSSetDiscreteGradient(ams, hy_G);
5560  HYPRE_AMSSetCoordinateVectors(ams, hy_x, hy_y, hy_z);
5561  HYPRE_AMSSetInterpolations(ams, hy_Pi, hy_Pix, hy_Piy, hy_Piz);
5562  ams_data->owns_Pi = owns_Pi;
5563 
5564  // set additional AMS options
5565  HYPRE_AMSSetSmoothingOptions(ams, A_relax_type, A_relax_times, A_relax_weight,
5566  A_omega);
5567 
5568  hypre_AMSDataAChebyOrder(ams_data) = A_cheby_order;
5569  hypre_AMSDataAChebyFraction(ams_data) = A_cheby_fraction;
5570 
5571  HYPRE_AMSSetAlphaAMGOptions(ams, B_Pi_coarsen_type, B_Pi_agg_levels,
5572  B_Pi_relax_type,
5573  B_Pi_theta, B_Pi_interp_type, B_Pi_Pmax);
5574  HYPRE_AMSSetBetaAMGOptions(ams, B_G_coarsen_type, B_G_agg_levels,
5575  B_G_relax_type,
5576  B_G_theta, B_G_interp_type, B_G_Pmax);
5577 
5578  HYPRE_AMSSetAlphaAMGCoarseRelaxType(ams, B_Pi_coarse_relax_type);
5579  HYPRE_AMSSetBetaAMGCoarseRelaxType(ams, B_G_coarse_relax_type);
5580 
5581  ams_data->beta_is_zero = beta_is_zero;
5582 
5583 #else
5584  HYPRE_AMSDestroy(ams);
5585 
5586  MakeSolver(space_dim, ams_cycle_type);
5587 
5588  HYPRE_AMSSetPrintLevel(ams, print_level);
5589  if (singular) { HYPRE_AMSSetBetaPoissonMatrix(ams, NULL); }
5590 
5591  HYPRE_AMSSetDiscreteGradient(ams, *G);
5592  if (x != nullptr)
5593  {
5594  HYPRE_AMSSetCoordinateVectors(ams,
5595  x ? (HYPRE_ParVector)(*x) : nullptr,
5596  y ? (HYPRE_ParVector)(*y) : nullptr,
5597  z ? (HYPRE_ParVector)(*z) : nullptr);
5598  }
5599  else
5600  {
5601  HYPRE_AMSSetInterpolations(ams,
5602  Pi ? (HYPRE_ParCSRMatrix) *Pi : nullptr,
5603  Pix ? (HYPRE_ParCSRMatrix) *Pix : nullptr,
5604  Piy ? (HYPRE_ParCSRMatrix) *Piy : nullptr,
5605  Piz ? (HYPRE_ParCSRMatrix) *Piz : nullptr);
5606  }
5607 #endif
5608 }
5609 
5611 {
5612  const HypreParMatrix *new_A = dynamic_cast<const HypreParMatrix *>(&op);
5613  MFEM_VERIFY(new_A, "new Operator must be a HypreParMatrix!");
5614 
5615  if (A) { ResetAMSPrecond(); }
5616 
5617  // update base classes: Operator, Solver, HypreSolver
5618  height = new_A->Height();
5619  width = new_A->Width();
5620  A = const_cast<HypreParMatrix *>(new_A);
5621 
5622  setup_called = 0;
5623  delete X;
5624  delete B;
5625  B = X = NULL;
5626  auxB.Delete(); auxB.Reset();
5627  auxX.Delete(); auxX.Reset();
5628 }
5629 
5631 {
5632  HYPRE_AMSDestroy(ams);
5633 
5634  delete x;
5635  delete y;
5636  delete z;
5637 
5638  delete G;
5639  delete Pi;
5640  delete Pix;
5641  delete Piy;
5642  delete Piz;
5643 }
5644 
5645 void HypreAMS::SetPrintLevel(int print_lvl)
5646 {
5647  HYPRE_AMSSetPrintLevel(ams, print_lvl);
5648  print_level = print_lvl;
5649 }
5650 
5652 {
5653  Init(face_fespace);
5654 }
5655 
5657  : HypreSolver(&A)
5658 {
5659  Init(face_fespace);
5660 }
5661 
5663  const HypreParMatrix &A, HypreParMatrix *C_, HypreParMatrix *G_,
5665  : HypreSolver(&A),
5666  x(x_), y(y_), z(z_),
5667  G(G_), C(C_),
5668  ND_Pi(NULL), ND_Pix(NULL), ND_Piy(NULL), ND_Piz(NULL),
5669  RT_Pi(NULL), RT_Pix(NULL), RT_Piy(NULL), RT_Piz(NULL)
5670 {
5671  MFEM_ASSERT(C != NULL, "");
5672  MFEM_ASSERT(G != NULL, "");
5673  MFEM_ASSERT(x != NULL, "");
5674  MFEM_ASSERT(y != NULL, "");
5675  MFEM_ASSERT(z != NULL, "");
5676 
5677  MakeSolver();
5678 
5679  HYPRE_ADSSetCoordinateVectors(ads, *x, *y, *z);
5680  HYPRE_ADSSetDiscreteCurl(ads, *C);
5681  HYPRE_ADSSetDiscreteGradient(ads, *G);
5682 }
5683 
5684 void HypreADS::MakeSolver()
5685 {
5686  int rlx_sweeps = 1;
5687  double rlx_weight = 1.0;
5688  double rlx_omega = 1.0;
5689 #if !defined(HYPRE_USING_GPU)
5690  int rlx_type = 2;
5691  int amg_coarsen_type = 10;
5692  int amg_agg_levels = 1;
5693  int amg_rlx_type = 8;
5694  double theta = 0.25;
5695  int amg_interp_type = 6;
5696  int amg_Pmax = 4;
5697 #else
5698  int rlx_type = 1;
5699  int amg_coarsen_type = 8;
5700  int amg_agg_levels = 0;
5701  int amg_rlx_type = 18;
5702  double theta = 0.25;
5703  int amg_interp_type = 6;
5704  int amg_Pmax = 4;
5705 #endif
5706 
5707  HYPRE_ADSCreate(&ads);
5708 
5709  HYPRE_ADSSetTol(ads, 0.0);
5710  HYPRE_ADSSetMaxIter(ads, 1); // use as a preconditioner
5711  HYPRE_ADSSetCycleType(ads, cycle_type);
5712  HYPRE_ADSSetPrintLevel(ads, 1);
5713 
5714  // set additional ADS options
5715  HYPRE_ADSSetSmoothingOptions(ads, rlx_type, rlx_sweeps, rlx_weight, rlx_omega);
5716  HYPRE_ADSSetAMGOptions(ads, amg_coarsen_type, amg_agg_levels, amg_rlx_type,
5717  theta, amg_interp_type, amg_Pmax);
5718  HYPRE_ADSSetAMSOptions(ads, ams_cycle_type, amg_coarsen_type, amg_agg_levels,
5719  amg_rlx_type, theta, amg_interp_type, amg_Pmax);
5720 
5721  // The ADS preconditioner requires inverting singular matrices with BoomerAMG,
5722  // which are handled correctly in hypre's Solve method, but can produce hypre
5723  // errors in the Setup (specifically in the l1 row norm computation). See the
5724  // documentation of SetErrorMode() for more details.
5726 }
5727 
5728 void HypreADS::MakeDiscreteMatrices(ParFiniteElementSpace *face_fespace)
5729 {
5730  const FiniteElementCollection *face_fec = face_fespace->FEColl();
5731  bool trace_space =
5732  (dynamic_cast<const RT_Trace_FECollection*>(face_fec) != NULL);
5733 
5734  MFEM_VERIFY(!face_fespace->IsVariableOrder(), "");
5735  int p = face_fec->GetOrder();
5736 
5737  // define the nodal and edge finite element spaces associated with face_fespace
5738  ParMesh *pmesh = (ParMesh *) face_fespace->GetMesh();
5739  FiniteElementCollection *vert_fec, *edge_fec;
5740  if (trace_space)
5741  {
5742  vert_fec = new H1_Trace_FECollection(p, 3);
5743  edge_fec = new ND_Trace_FECollection(p, 3);
5744  }
5745  else
5746  {
5747  vert_fec = new H1_FECollection(p, 3);
5748  edge_fec = new ND_FECollection(p, 3);
5749  }
5750 
5751  ParFiniteElementSpace *vert_fespace = new ParFiniteElementSpace(pmesh,
5752  vert_fec);
5753  ParFiniteElementSpace *edge_fespace = new ParFiniteElementSpace(pmesh,
5754  edge_fec);
5755 
5756  // generate and set the vertex coordinates
5757  if (p == 1 && pmesh->GetNodes() == NULL)
5758  {
5759  ParGridFunction x_coord(vert_fespace);
5760  ParGridFunction y_coord(vert_fespace);
5761  ParGridFunction z_coord(vert_fespace);
5762  double *coord;
5763  for (int i = 0; i < pmesh->GetNV(); i++)
5764  {
5765  coord = pmesh -> GetVertex(i);
5766  x_coord(i) = coord[0];
5767  y_coord(i) = coord[1];
5768  z_coord(i) = coord[2];
5769  }
5770  x = x_coord.ParallelProject();
5771  y = y_coord.ParallelProject();
5772  z = z_coord.ParallelProject();
5773  x->HypreReadWrite();
5774  y->HypreReadWrite();
5775  z->HypreReadWrite();
5776  HYPRE_ADSSetCoordinateVectors(ads, *x, *y, *z);
5777  }
5778  else
5779  {
5780  x = NULL;
5781  y = NULL;
5782  z = NULL;
5783  }
5784 
5785  // generate and set the discrete curl
5786  ParDiscreteLinearOperator *curl;
5787  curl = new ParDiscreteLinearOperator(edge_fespace, face_fespace);
5788  if (trace_space)
5789  {
5790  curl->AddTraceFaceInterpolator(new CurlInterpolator);
5791  }
5792  else
5793  {
5794  curl->AddDomainInterpolator(new CurlInterpolator);
5795  }
5796  curl->Assemble();
5797  curl->Finalize();
5798  C = curl->ParallelAssemble();
5799  C->CopyColStarts(); // since we'll delete edge_fespace
5800  HYPRE_ADSSetDiscreteCurl(ads, *C);
5801  delete curl;
5802 
5803  // generate and set the discrete gradient
5804  ParDiscreteLinearOperator *grad;
5805  grad = new ParDiscreteLinearOperator(vert_fespace, edge_fespace);
5806  if (trace_space)
5807  {
5808  grad->AddTraceFaceInterpolator(new GradientInterpolator);
5809  }
5810  else
5811  {
5812  grad->AddDomainInterpolator(new GradientInterpolator);
5813  }
5814  grad->Assemble();
5815  grad->Finalize();
5816  G = grad->ParallelAssemble();
5817  G->CopyColStarts(); // since we'll delete vert_fespace
5818  G->CopyRowStarts(); // since we'll delete edge_fespace
5819  HYPRE_ADSSetDiscreteGradient(ads, *G);
5820  delete grad;
5821 
5822  // generate and set the Nedelec and Raviart-Thomas interpolation matrices
5823  RT_Pi = RT_Pix = RT_Piy = RT_Piz = NULL;
5824  ND_Pi = ND_Pix = ND_Piy = ND_Piz = NULL;
5825  if (p > 1 || pmesh->GetNodes() != NULL)
5826  {
5827  ParFiniteElementSpace *vert_fespace_d
5828  = new ParFiniteElementSpace(pmesh, vert_fec, 3, Ordering::byVDIM);
5829 
5830  ParDiscreteLinearOperator *id_ND;
5831  id_ND = new ParDiscreteLinearOperator(vert_fespace_d, edge_fespace);
5832  if (trace_space)
5833  {
5834  id_ND->AddTraceFaceInterpolator(new IdentityInterpolator);
5835  }
5836  else
5837  {
5838  id_ND->AddDomainInterpolator(new IdentityInterpolator);
5839  }
5840  id_ND->Assemble();
5841  id_ND->Finalize();
5842 
5843  if (ams_cycle_type < 10)
5844  {
5845  ND_Pi = id_ND->ParallelAssemble();
5846  ND_Pi->CopyColStarts(); // since we'll delete vert_fespace_d
5847  ND_Pi->CopyRowStarts(); // since we'll delete edge_fespace
5848  }
5849  else
5850  {
5851  Array2D<HypreParMatrix *> ND_Pi_blocks;
5852  id_ND->GetParBlocks(ND_Pi_blocks);
5853  ND_Pix = ND_Pi_blocks(0,0);
5854  ND_Piy = ND_Pi_blocks(0,1);
5855  ND_Piz = ND_Pi_blocks(0,2);
5856  }
5857 
5858  delete id_ND;
5859 
5860  ParDiscreteLinearOperator *id_RT;
5861  id_RT = new ParDiscreteLinearOperator(vert_fespace_d, face_fespace);
5862  if (trace_space)
5863  {
5864  id_RT->AddTraceFaceInterpolator(new NormalInterpolator);
5865  }
5866  else
5867  {
5868  id_RT->AddDomainInterpolator(new IdentityInterpolator);
5869  }
5870  id_RT->Assemble();
5871  id_RT->Finalize();
5872 
5873  if (cycle_type < 10)
5874  {
5875  RT_Pi = id_RT->ParallelAssemble();
5876  RT_Pi->CopyColStarts(); // since we'll delete vert_fespace_d
5877  }
5878  else
5879  {
5880  Array2D<HypreParMatrix *> RT_Pi_blocks;
5881  id_RT->GetParBlocks(RT_Pi_blocks);
5882  RT_Pix = RT_Pi_blocks(0,0);
5883  RT_Piy = RT_Pi_blocks(0,1);
5884  RT_Piz = RT_Pi_blocks(0,2);
5885  }
5886 
5887  delete id_RT;
5888 
5889  HYPRE_ParCSRMatrix HY_RT_Pi, HY_RT_Pix, HY_RT_Piy, HY_RT_Piz;
5890  HY_RT_Pi = (RT_Pi) ? (HYPRE_ParCSRMatrix) *RT_Pi : NULL;
5891  HY_RT_Pix = (RT_Pix) ? (HYPRE_ParCSRMatrix) *RT_Pix : NULL;
5892  HY_RT_Piy = (RT_Piy) ? (HYPRE_ParCSRMatrix) *RT_Piy : NULL;
5893  HY_RT_Piz = (RT_Piz) ? (HYPRE_ParCSRMatrix) *RT_Piz : NULL;
5894  HYPRE_ParCSRMatrix HY_ND_Pi, HY_ND_Pix, HY_ND_Piy, HY_ND_Piz;
5895  HY_ND_Pi = (ND_Pi) ? (HYPRE_ParCSRMatrix) *ND_Pi : NULL;
5896  HY_ND_Pix = (ND_Pix) ? (HYPRE_ParCSRMatrix) *ND_Pix : NULL;
5897  HY_ND_Piy = (ND_Piy) ? (HYPRE_ParCSRMatrix) *ND_Piy : NULL;
5898  HY_ND_Piz = (ND_Piz) ? (HYPRE_ParCSRMatrix) *ND_Piz : NULL;
5899  HYPRE_ADSSetInterpolations(ads,
5900  HY_RT_Pi, HY_RT_Pix, HY_RT_Piy, HY_RT_Piz,
5901  HY_ND_Pi, HY_ND_Pix, HY_ND_Piy, HY_ND_Piz);
5902 
5903  delete vert_fespace_d;
5904  }
5905 
5906  delete vert_fec;
5907  delete vert_fespace;
5908  delete edge_fec;
5909  delete edge_fespace;
5910 }
5911 
5912 void HypreADS::Init(ParFiniteElementSpace *face_fespace)
5913 {
5914  MakeSolver();
5915  MakeDiscreteMatrices(face_fespace);
5916 }
5917 
5918 void HypreADS::ResetADSPrecond()
5919 {
5920  HYPRE_ADSDestroy(ads);
5921 
5922  MakeSolver();
5923 
5924  HYPRE_ADSSetPrintLevel(ads, print_level);
5925 
5926  HYPRE_ADSSetDiscreteCurl(ads, *C);
5927  HYPRE_ADSSetDiscreteGradient(ads, *G);
5928  if (x != nullptr)
5929  {
5930  MFEM_VERIFY(x && y && z, "");
5931  HYPRE_ADSSetCoordinateVectors(ads, *x, *y, *z);
5932  }
5933  else
5934  {
5935  HYPRE_ParCSRMatrix HY_RT_Pi, HY_RT_Pix, HY_RT_Piy, HY_RT_Piz;
5936  HY_RT_Pi = (RT_Pi) ? (HYPRE_ParCSRMatrix) *RT_Pi : NULL;
5937  HY_RT_Pix = (RT_Pix) ? (HYPRE_ParCSRMatrix) *RT_Pix : NULL;
5938  HY_RT_Piy = (RT_Piy) ? (HYPRE_ParCSRMatrix) *RT_Piy : NULL;
5939  HY_RT_Piz = (RT_Piz) ? (HYPRE_ParCSRMatrix) *RT_Piz : NULL;
5940  HYPRE_ParCSRMatrix HY_ND_Pi, HY_ND_Pix, HY_ND_Piy, HY_ND_Piz;
5941  HY_ND_Pi = (ND_Pi) ? (HYPRE_ParCSRMatrix) *ND_Pi : NULL;
5942  HY_ND_Pix = (ND_Pix) ? (HYPRE_ParCSRMatrix) *ND_Pix : NULL;
5943  HY_ND_Piy = (ND_Piy) ? (HYPRE_ParCSRMatrix) *ND_Piy : NULL;
5944  HY_ND_Piz = (ND_Piz) ? (HYPRE_ParCSRMatrix) *ND_Piz : NULL;
5945  HYPRE_ADSSetInterpolations(ads,
5946  HY_RT_Pi, HY_RT_Pix, HY_RT_Piy, HY_RT_Piz,
5947  HY_ND_Pi, HY_ND_Pix, HY_ND_Piy, HY_ND_Piz);
5948  }
5949 }
5950 
5952 {
5953  const HypreParMatrix *new_A = dynamic_cast<const HypreParMatrix *>(&op);
5954  MFEM_VERIFY(new_A, "new Operator must be a HypreParMatrix!");
5955 
5956  if (A) { ResetADSPrecond(); }
5957 
5958  // update base classes: Operator, Solver, HypreSolver
5959  height = new_A->Height();
5960  width = new_A->Width();
5961  A = const_cast<HypreParMatrix *>(new_A);
5962 
5963  setup_called = 0;
5964  delete X;
5965  delete B;
5966  B = X = NULL;
5967  auxB.Delete(); auxB.Reset();
5968  auxX.Delete(); auxX.Reset();
5969 }
5970 
5972 {
5973  HYPRE_ADSDestroy(ads);
5974 
5975  delete x;
5976  delete y;
5977  delete z;
5978 
5979  delete G;
5980  delete C;
5981 
5982  delete RT_Pi;
5983  delete RT_Pix;
5984  delete RT_Piy;
5985  delete RT_Piz;
5986 
5987  delete ND_Pi;
5988  delete ND_Pix;
5989  delete ND_Piy;
5990  delete ND_Piz;
5991 }
5992 
5993 void HypreADS::SetPrintLevel(int print_lvl)
5994 {
5995  HYPRE_ADSSetPrintLevel(ads, print_lvl);
5996  print_level = print_lvl;
5997 }
5998 
5999 HypreLOBPCG::HypreMultiVector::HypreMultiVector(int n, HypreParVector & v,
6000  mv_InterfaceInterpreter & interpreter)
6001  : hpv(NULL),
6002  nv(n)
6003 {
6004  mv_ptr = mv_MultiVectorCreateFromSampleVector(&interpreter, nv,
6005  (HYPRE_ParVector)v);
6006 
6007  HYPRE_ParVector* vecs = NULL;
6008  {
6009  mv_TempMultiVector* tmp =
6010  (mv_TempMultiVector*)mv_MultiVectorGetData(mv_ptr);
6011  vecs = (HYPRE_ParVector*)(tmp -> vector);
6012  }
6013 
6014  hpv = new HypreParVector*[nv];
6015  for (int i=0; i<nv; i++)
6016  {
6017  hpv[i] = new HypreParVector(vecs[i]);
6018  }
6019 }
6020 
6021 HypreLOBPCG::HypreMultiVector::~HypreMultiVector()
6022 {
6023  if ( hpv != NULL )
6024  {
6025  for (int i=0; i<nv; i++)
6026  {
6027  delete hpv[i];
6028  }
6029  delete [] hpv;
6030  }
6031 
6032  mv_MultiVectorDestroy(mv_ptr);
6033 }
6034 
6035 void
6036 HypreLOBPCG::HypreMultiVector::Randomize(HYPRE_Int seed_)
6037 {
6038  mv_MultiVectorSetRandom(mv_ptr, seed_);
6039 }
6040 
6041 HypreParVector &
6042 HypreLOBPCG::HypreMultiVector::GetVector(unsigned int i)
6043 {
6044  MFEM_ASSERT((int)i < nv, "index out of range");
6045 
6046  return ( *hpv[i] );
6047 }
6048 
6049 HypreParVector **
6050 HypreLOBPCG::HypreMultiVector::StealVectors()
6051 {
6052  HypreParVector ** hpv_ret = hpv;
6053 
6054  hpv = NULL;
6055 
6056  mv_TempMultiVector * mv_tmp =
6057  (mv_TempMultiVector*)mv_MultiVectorGetData(mv_ptr);
6058 
6059  mv_tmp->ownsVectors = 0;
6060 
6061  for (int i=0; i<nv; i++)
6062  {
6063  hpv_ret[i]->SetOwnership(1);
6064  }
6065 
6066  return hpv_ret;
6067 }
6068 
6070  : comm(c),
6071  myid(0),
6072  numProcs(1),
6073  nev(10),
6074  seed(75),
6075  glbSize(-1),
6076  part(NULL),
6077  multi_vec(NULL),
6078  x(NULL),
6079  subSpaceProj(NULL)
6080 {
6081  MPI_Comm_size(comm,&numProcs);
6082  MPI_Comm_rank(comm,&myid);
6083 
6084  HYPRE_ParCSRSetupInterpreter(&interpreter);
6085  HYPRE_ParCSRSetupMatvec(&matvec_fn);
6086  HYPRE_LOBPCGCreate(&interpreter, &matvec_fn, &lobpcg_solver);
6087 }
6088 
6090 {
6091  delete multi_vec;
6092  delete x;
6093  delete [] part;
6094 
6095  HYPRE_LOBPCGDestroy(lobpcg_solver);
6096 }
6097 
6098 void
6100 {
6101  HYPRE_LOBPCGSetTol(lobpcg_solver, tol);
6102 }
6103 
6104 void
6105 HypreLOBPCG::SetRelTol(double rel_tol)
6106 {
6107 #if MFEM_HYPRE_VERSION >= 21101
6108  HYPRE_LOBPCGSetRTol(lobpcg_solver, rel_tol);
6109 #else
6110  MFEM_ABORT("This method requires HYPRE version >= 2.11.1");
6111 #endif
6112 }
6113 
6114 void
6116 {
6117  HYPRE_LOBPCGSetMaxIter(lobpcg_solver, max_iter);
6118 }
6119 
6120 void
6122 {
6123  if (myid == 0)
6124  {
6125  HYPRE_LOBPCGSetPrintLevel(lobpcg_solver, logging);
6126  }
6127 }
6128 
6129 void
6131 {
6132  HYPRE_LOBPCGSetPrecondUsageMode(lobpcg_solver, pcg_mode);
6133 }
6134 
6135 void
6137 {
6138  HYPRE_LOBPCGSetPrecond(lobpcg_solver,
6139  (HYPRE_PtrToSolverFcn)this->PrecondSolve,
6140  (HYPRE_PtrToSolverFcn)this->PrecondSetup,
6141  (HYPRE_Solver)&precond);
6142 }
6143 
6144 void
6146 {
6147  HYPRE_BigInt locSize = A.Width();
6148 
6149  if (HYPRE_AssumedPartitionCheck())
6150  {
6151  part = new HYPRE_BigInt[2];
6152 
6153  MPI_Scan(&locSize, &part[1], 1, HYPRE_MPI_BIG_INT, MPI_SUM, comm);
6154 
6155  part[0] = part[1] - locSize;
6156 
6157  MPI_Allreduce(&locSize, &glbSize, 1, HYPRE_MPI_BIG_INT, MPI_SUM, comm);
6158  }
6159  else
6160  {
6161  part = new HYPRE_BigInt[numProcs+1];
6162 
6163  MPI_Allgather(&locSize, 1, HYPRE_MPI_BIG_INT,
6164  &part[1], 1, HYPRE_MPI_BIG_INT, comm);
6165 
6166  part[0] = 0;
6167  for (int i=0; i<numProcs; i++)
6168  {
6169  part[i+1] += part[i];
6170  }
6171 
6172  glbSize = part[numProcs];
6173  }
6174 
6175  if ( x != NULL )
6176  {
6177  delete x;
6178  }
6179 
6180  // Create a distributed vector without a data array.
6181  const bool is_device_ptr = true;
6182  x = new HypreParVector(comm,glbSize,NULL,part,is_device_ptr);
6183 
6184  matvec_fn.MatvecCreate = this->OperatorMatvecCreate;
6185  matvec_fn.Matvec = this->OperatorMatvec;
6186  matvec_fn.MatvecDestroy = this->OperatorMatvecDestroy;
6187 
6188  HYPRE_LOBPCGSetup(lobpcg_solver,(HYPRE_Matrix)&A,NULL,NULL);
6189 }
6190 
6191 void
6193 {
6194  matvec_fn.MatvecCreate = this->OperatorMatvecCreate;
6195  matvec_fn.Matvec = this->OperatorMatvec;
6196  matvec_fn.MatvecDestroy = this->OperatorMatvecDestroy;
6197 
6198  HYPRE_LOBPCGSetupB(lobpcg_solver,(HYPRE_Matrix)&M,NULL);
6199 }
6200 
6201 void
6203 {
6204  // Initialize eigenvalues array with marker values
6205  eigs.SetSize(nev);
6206 
6207  for (int i=0; i<nev; i++)
6208  {
6209  eigs[i] = eigenvalues[i];
6210  }
6211 }
6212 
6213 const HypreParVector &
6214 HypreLOBPCG::GetEigenvector(unsigned int i) const
6215 {
6216  return multi_vec->GetVector(i);
6217 }
6218 
6219 void
6221 {
6222  // Initialize HypreMultiVector object if necessary
6223  if ( multi_vec == NULL )
6224  {
6225  MFEM_ASSERT(x != NULL, "In HypreLOBPCG::SetInitialVectors()");
6226 
6227  multi_vec = new HypreMultiVector(nev, *x, interpreter);
6228  }
6229 
6230  // Copy the vectors provided
6231  for (int i=0; i < min(num_vecs,nev); i++)
6232  {
6233  multi_vec->GetVector(i) = *vecs[i];
6234  }
6235 
6236  // Randomize any remaining vectors
6237  for (int i=min(num_vecs,nev); i < nev; i++)
6238  {
6239  multi_vec->GetVector(i).Randomize(seed);
6240  }
6241 
6242  // Ensure all vectors are in the proper subspace
6243  if ( subSpaceProj != NULL )
6244  {
6245  HypreParVector y(*x);
6246  y = multi_vec->GetVector(0);
6247 
6248  for (int i=1; i<nev; i++)
6249  {
6250  subSpaceProj->Mult(multi_vec->GetVector(i),
6251  multi_vec->GetVector(i-1));
6252  }
6253  subSpaceProj->Mult(y,
6254  multi_vec->GetVector(nev-1));
6255  }
6256 }
6257 
6258 void
6260 {
6261  // Initialize HypreMultiVector object if necessary
6262  if ( multi_vec == NULL )
6263  {
6264  MFEM_ASSERT(x != NULL, "In HypreLOBPCG::Solve()");
6265 
6266  multi_vec = new HypreMultiVector(nev, *x, interpreter);
6267  multi_vec->Randomize(seed);
6268 
6269  if ( subSpaceProj != NULL )
6270  {
6271  HypreParVector y(*x);
6272  y = multi_vec->GetVector(0);
6273 
6274  for (int i=1; i<nev; i++)
6275  {
6276  subSpaceProj->Mult(multi_vec->GetVector(i),
6277  multi_vec->GetVector(i-1));
6278  }
6279  subSpaceProj->Mult(y, multi_vec->GetVector(nev-1));
6280  }
6281  }
6282 
6283  eigenvalues.SetSize(nev);
6284  eigenvalues = NAN;
6285 
6286  // Perform eigenmode calculation
6287  //
6288  // The eigenvalues are computed in ascending order (internally the
6289  // order is determined by the LAPACK routine 'dsydv'.)
6290  HYPRE_LOBPCGSolve(lobpcg_solver, NULL, *multi_vec, eigenvalues);
6291 }
6292 
6293 void *
6294 HypreLOBPCG::OperatorMatvecCreate( void *A,
6295  void *x )
6296 {
6297  void *matvec_data;
6298 
6299  matvec_data = NULL;
6300 
6301  return ( matvec_data );
6302 }
6303 
6304 HYPRE_Int
6305 HypreLOBPCG::OperatorMatvec( void *matvec_data,
6306  HYPRE_Complex alpha,
6307  void *A,
6308  void *x,
6309  HYPRE_Complex beta,
6310  void *y )
6311 {
6312  MFEM_VERIFY(alpha == 1.0 && beta == 0.0, "values not supported");
6313 
6314  Operator *Aop = (Operator*)A;
6315 
6316  hypre_ParVector * xPar = (hypre_ParVector *)x;
6317  hypre_ParVector * yPar = (hypre_ParVector *)y;
6318 
6319  HypreParVector xVec(xPar);
6320  HypreParVector yVec(yPar);
6321 
6322  Aop->Mult( xVec, yVec );
6323 
6324  // Move data back to hypre's device memory location in case the above Mult
6325  // operation moved it to host.
6326  yVec.HypreReadWrite();
6327 
6328  return 0;
6329 }
6330 
6331 HYPRE_Int
6332 HypreLOBPCG::OperatorMatvecDestroy( void *matvec_data )
6333 {
6334  return 0;
6335 }
6336 
6337 HYPRE_Int
6338 HypreLOBPCG::PrecondSolve(void *solver,
6339  void *A,
6340  void *b,
6341  void *x)
6342 {
6343  Solver *PC = (Solver*)solver;
6344 
6345  hypre_ParVector * bPar = (hypre_ParVector *)b;
6346  hypre_ParVector * xPar = (hypre_ParVector *)x;
6347 
6348  HypreParVector bVec(bPar);
6349  HypreParVector xVec(xPar);
6350 
6351  PC->Mult( bVec, xVec );
6352 
6353  // Move data back to hypre's device memory location in case the above Mult
6354  // operation moved it to host.
6355  xVec.HypreReadWrite();
6356 
6357  return 0;
6358 }
6359 
6360 HYPRE_Int
6361 HypreLOBPCG::PrecondSetup(void *solver,
6362  void *A,
6363  void *b,
6364  void *x)
6365 {
6366  return 0;
6367 }
6368 
6369 HypreAME::HypreAME(MPI_Comm comm)
6370  : myid(0),
6371  numProcs(1),
6372  nev(10),
6373  setT(false),
6374  ams_precond(NULL),
6375  eigenvalues(NULL),
6376  multi_vec(NULL),
6377  eigenvectors(NULL)
6378 {
6379  MPI_Comm_size(comm,&numProcs);
6380  MPI_Comm_rank(comm,&myid);
6381 
6382  HYPRE_AMECreate(&ame_solver);
6383  HYPRE_AMESetPrintLevel(ame_solver, 0);
6384 }
6385 
6387 {
6388  if ( multi_vec )
6389  {
6390  mfem_hypre_TFree_host(multi_vec);
6391  }
6392 
6393  if ( eigenvectors )
6394  {
6395  for (int i=0; i<nev; i++)
6396  {
6397  delete eigenvectors[i];
6398  }
6399  }
6400  delete [] eigenvectors;
6401 
6402  if ( eigenvalues )
6403  {
6404  mfem_hypre_TFree_host(eigenvalues);
6405  }
6406 
6407  HYPRE_AMEDestroy(ame_solver);
6408 }
6409 
6410 void
6412 {
6413  nev = num_eigs;
6414 
6415  HYPRE_AMESetBlockSize(ame_solver, nev);
6416 }
6417 
6418 void
6419 HypreAME::SetTol(double tol)
6420 {
6421  HYPRE_AMESetTol(ame_solver, tol);
6422 }
6423 
6424 void
6425 HypreAME::SetRelTol(double rel_tol)
6426 {
6427 #if MFEM_HYPRE_VERSION >= 21101
6428  HYPRE_AMESetRTol(ame_solver, rel_tol);
6429 #else
6430  MFEM_ABORT("This method requires HYPRE version >= 2.11.1");
6431 #endif
6432 }
6433 
6434 void
6436 {
6437  HYPRE_AMESetMaxIter(ame_solver, max_iter);
6438 }
6439 
6440 void
6442 {
6443  if (myid == 0)
6444  {
6445  HYPRE_AMESetPrintLevel(ame_solver, logging);
6446  }
6447 }
6448 
6449 void
6451 {
6452  ams_precond = &precond;
6453 }
6454 
6455 void
6457 {
6458  if ( !setT )
6459  {
6460  HYPRE_Solver ams_precond_ptr = (HYPRE_Solver)*ams_precond;
6461 
6462  ams_precond->SetupFcn()(*ams_precond,A,NULL,NULL);
6463 
6464  HYPRE_AMESetAMSSolver(ame_solver, ams_precond_ptr);
6465  }
6466 
6467  HYPRE_AMESetup(ame_solver);
6468 }
6469 
6470 void
6472 {
6473  HYPRE_ParCSRMatrix parcsr_M = M;
6474  HYPRE_AMESetMassMatrix(ame_solver,(HYPRE_ParCSRMatrix)parcsr_M);
6475 }
6476 
6477 void
6479 {
6480  HYPRE_AMESolve(ame_solver);
6481 
6482  // Grab a pointer to the eigenvalues from AME
6483  HYPRE_AMEGetEigenvalues(ame_solver,&eigenvalues);
6484 
6485  // Grad a pointer to the eigenvectors from AME
6486  HYPRE_AMEGetEigenvectors(ame_solver,&multi_vec);
6487 }
6488 
6489 void
6491 {
6492  // Initialize eigenvalues array with marker values
6493  eigs.SetSize(nev); eigs = -1.0;
6494 
6495  // Copy eigenvalues to eigs array
6496  for (int i=0; i<nev; i++)
6497  {
6498  eigs[i] = eigenvalues[i];
6499  }
6500 }
6501 
6502 void
6503 HypreAME::createDummyVectors() const
6504 {
6505  eigenvectors = new HypreParVector*[nev];
6506  for (int i=0; i<nev; i++)
6507  {
6508  eigenvectors[i] = new HypreParVector(multi_vec[i]);
6509  eigenvectors[i]->SetOwnership(1);
6510  }
6511 }
6512 
6513 const HypreParVector &
6514 HypreAME::GetEigenvector(unsigned int i) const
6515 {
6516  if ( eigenvectors == NULL )
6517  {
6518  this->createDummyVectors();
6519  }
6520 
6521  return *eigenvectors[i];
6522 }
6523 
6524 HypreParVector **
6526 {
6527  if ( eigenvectors == NULL )
6528  {
6529  this->createDummyVectors();
6530  }
6531 
6532  // Set the local pointers to NULL so that they won't be deleted later
6533  HypreParVector ** vecs = eigenvectors;
6534  eigenvectors = NULL;
6535  multi_vec = NULL;
6536 
6537  return vecs;
6538 }
6539 
6540 }
6541 
6542 #endif
MemoryType GetHypreMemoryType()
The MemoryType used by MFEM when allocating arrays for Hypre objects.
Definition: hypre.hpp:149
virtual ~HypreBoomerAMG()
Definition: hypre.cpp:5234
void SetAbsTol(double atol)
Definition: hypre.cpp:3995
void GetEigenvalues(Array< double > &eigenvalues) const
Collect the converged eigenvalues.
Definition: hypre.cpp:6202
Hash function for data sequences.
Definition: hash.hpp:456
Memory< double > auxX
Definition: hypre.hpp:981
void SetTol(double tol)
Definition: hypre.cpp:3990
void SetPreconditioner(HypreSolver &precond)
Set the hypre solver to be used as a preconditioner.
Definition: hypre.cpp:4205
void SetType(HYPRE_Int ilu_type)
Definition: hypre.cpp:4746
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Solve Ax=b with hypre&#39;s GMRES.
Definition: hypre.cpp:4215
void EliminateRowsCols(const Array< int > &rows_cols, const HypreParVector &X, HypreParVector &B)
Definition: hypre.cpp:2276
HypreADS(ParFiniteElementSpace *face_fespace)
Definition: hypre.cpp:5651
HypreParMatrix * ExtractSubmatrix(const Array< int > &indices, double threshold=0.0) const
Definition: hypre.cpp:1630
const T * Read(MemoryClass mc, int size) const
Get read-only access to the memory with the given MemoryClass.
virtual int NumNonZeroElems() const
Returns the number of the nonzero elements in the matrix.
Definition: sparsemat.cpp:1588
static constexpr Type default_type
Definition: hypre.hpp:1043
HypreEuclid(MPI_Comm comm)
Definition: hypre.cpp:4601
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
Definition: hypre.cpp:4325
void EliminateBC(const HypreParMatrix &A, const HypreParMatrix &Ae, const Array< int > &ess_dof_list, const Vector &X, Vector &B)
Eliminate essential BC specified by ess_dof_list from the solution X to the r.h.s. B.
Definition: hypre.cpp:3258
HypreParVector * NewTrueDofVector()
Definition: pfespace.hpp:327
HypreParVector * X0
FIR Filter Temporary Vectors.
Definition: hypre.hpp:985
void Clear()
Clear the contents of the SparseMatrix.
Definition: sparsemat.hpp:209
double min_eig_est
Minimal eigenvalue estimate for polynomial smoothing.
Definition: hypre.hpp:1017
Memory< T > & GetMemory()
Return a reference to the Memory object used by the Array.
Definition: array.hpp:120
Memory< double > data
Definition: vector.hpp:62
static MemoryClass GetHostMemoryClass()
Get the current Host MemoryClass. This is the MemoryClass used by most MFEM host Memory objects...
Definition: device.hpp:268
Memory< HYPRE_Int > I
void SetPrintLevel(int print_lvl)
Definition: hypre.cpp:5993
void SetRowScale(int row_scale)
Definition: hypre.cpp:4652
void SetMassMatrix(const HypreParMatrix &M)
Definition: hypre.cpp:6471
virtual ~HypreEuclid()
Definition: hypre.cpp:4691
void WrapMemoryWrite(Memory< double > &mem)
Replace the HypreParVector&#39;s data with the given Memory, mem, and prepare the vector for write access...
Definition: hypre.cpp:369
int setup_called
Was hypre&#39;s Setup function called already?
Definition: hypre.hpp:1123
Memory< double > data
void SetLogging(int logging)
Definition: hypre.cpp:4585
int Dimension() const
Dimension of the reference space used within the elements.
Definition: mesh.hpp:1020
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
Definition: hypre.cpp:3968
void SetPreconditioner(HypreSolver &precond)
Set the hypre solver to be used as a preconditioner.
Definition: hypre.cpp:4372
const HypreParVector & GetEigenvector(unsigned int i) const
Extract a single eigenvector.
Definition: hypre.cpp:6214
void MakeRef(const HypreParMatrix &master)
Make this HypreParMatrix a reference to &#39;master&#39;.
Definition: hypre.cpp:1335
void Read_IJMatrix(MPI_Comm comm, const char *fname)
Read a matrix saved as a HYPRE_IJMatrix.
Definition: hypre.cpp:2553
bool CanShallowCopy(const Memory< T > &src, MemoryClass mc)
Return true if the src Memory can be used with the MemoryClass mc.
Definition: hypre.cpp:108
void SetSize(int s)
Resize the vector to size s.
Definition: vector.hpp:517
HypreParMatrix & Add(const double beta, const HypreParMatrix &B)
Definition: hypre.hpp:763
HypreParVector * B
Right-hand side and solution vector.
Definition: hypre.hpp:1118
void Delete()
Delete the owned pointers and reset the Memory object.
double window_params[3]
Parameters for windowing function of FIR filter.
Definition: hypre.hpp:1019
virtual const double * HostRead() const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), false).
Definition: vector.hpp:457
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Solve Ax=b with hypre&#39;s PCG.
Definition: hypre.cpp:4038
MemoryType GetHostMemoryType() const
Return the host MemoryType of the Memory object.
void Mult(const Table &A, const Table &B, Table &C)
C = A * B (as boolean matrices)
Definition: table.cpp:475
void GetEigenvalues(Array< double > &eigenvalues) const
Collect the converged eigenvalues.
Definition: hypre.cpp:6490
void SetInitialVectors(int num_vecs, HypreParVector **vecs)
Definition: hypre.cpp:6220
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
Definition: hypre.cpp:4153
void SetWindowByName(const char *window_name)
Convenience function for setting canonical windowing parameters.
Definition: hypre.cpp:3457
Biwise-OR of all HIP backends.
Definition: device.hpp:90
int Width() const
Get the width (size of input) of the Operator. Synonym with NumCols().
Definition: operator.hpp:72
T * GetData()
Returns the data.
Definition: array.hpp:115
Issue warnings on hypre errors.
Definition: hypre.hpp:1109
int Size() const
Returns the size of the vector.
Definition: vector.hpp:197
T * Write(Memory< T > &mem, int size, bool on_dev=true)
Get a pointer for write access to mem with the mfem::Device&#39;s DeviceMemoryClass, if on_dev = true...
Definition: device.hpp:336
std::string GetHash() const
Return the hash string for the current sequence and reset (clear) the sequence.
Definition: hash.cpp:60
void SetPreconditioner(HypreSolver &precond)
Definition: hypre.cpp:6450
virtual double * HostWrite()
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), false).
Definition: vector.hpp:465
HyprePCG(MPI_Comm comm)
Definition: hypre.cpp:3950
void SetTol(double tol)
Definition: hypre.cpp:6419
HypreILU()
Constructor; sets the default options.
Definition: hypre.cpp:4698
int GetNumCols() const
Returns the number of columns in the diagonal block of the ParCSRMatrix.
Definition: hypre.hpp:628
T * Write(MemoryClass mc, int size)
Get write-only access to the memory with the given MemoryClass.
HypreAMS(ParFiniteElementSpace *edge_fespace)
Construct the AMS solver on the given edge finite element space.
Definition: hypre.cpp:5244
void SetKDim(int dim)
Definition: hypre.cpp:4357
virtual ~HypreGMRES()
Definition: hypre.cpp:4287
int NumRows() const
Get the number of rows (size of output) of the Operator. Synonym with Height().
Definition: operator.hpp:69
void SetPrintLevel(int print_lvl)
Definition: hypre.cpp:4367
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
Definition: hypre.cpp:5951
void SetPreconditioner(Solver &precond)
Definition: hypre.cpp:6136
void ResetTranspose() const
Reset (destroy) the internal transpose matrix that is created by EnsureMultTranspose() and MultTransp...
Definition: hypre.cpp:1717
virtual void Mult(const Vector &x, Vector &y) const =0
Operator application: y=A(x).
Memory< HYPRE_Int > & GetDiagMemoryJ()
Definition: hypre.hpp:871
void SetMassMatrix(Operator &M)
Definition: hypre.cpp:6192
Abstract parallel finite element space.
Definition: pfespace.hpp:28
HypreParVector()
Default constructor, no underlying hypre_ParVector is created.
Definition: hypre.hpp:177
Vector beta
void SetPrintLevel(int logging)
Definition: hypre.cpp:6121
bool iterative_mode
If true, use the second argument of Mult() as an initial guess.
Definition: operator.hpp:686
STL namespace.
void CopyFrom(const Memory &src, int size)
Copy size entries from src to *this.
virtual ~HypreAMS()
Definition: hypre.cpp:5630
void SetTol(double tol)
Definition: hypre.cpp:4175
void HostRead() const
Update the internal hypre_ParCSRMatrix object, A, to be on host.
Definition: hypre.hpp:837
virtual ~HypreILU()
Definition: hypre.cpp:4790
void SetAdvectiveOptions(int distance=15, const std::string &prerelax="", const std::string &postrelax="FFC")
Definition: hypre.cpp:5126
HYPRE_BigInt N() const
Returns the global number of columns.
Definition: hypre.hpp:585
void BlockInverseScale(const HypreParMatrix *A, HypreParMatrix *C, const Vector *b, HypreParVector *d, int blocksize, BlockInverseScaleJob job)
Definition: hypre.cpp:2766
void HostWrite()
Update the internal hypre_ParCSRMatrix object, A, to be on host.
Definition: hypre.hpp:848
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Relax the linear system Ax=b.
Definition: hypre.cpp:3631
bool pos_l1_norms
If set, take absolute values of the computed l1_norms.
Definition: hypre.hpp:1011
HypreParMatrix * ParMult(const HypreParMatrix *A, const HypreParMatrix *B, bool own_matrix)
Definition: hypre.cpp:2853
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Solve the linear system Ax=b.
Definition: hypre.cpp:3901
void SetPrintLevel(int print_lvl)
Definition: hypre.cpp:4200
void ScaleRows(const Vector &s)
Scale the local row i by s(i).
Definition: hypre.cpp:2017
void SetParams(double threshold, int max_levels)
Definition: hypre.cpp:4565
double poly_fraction
Fraction of spectrum to smooth for polynomial relaxation.
Definition: hypre.hpp:999
Memory< double > & GetMemoryData()
Definition: sparsemat.hpp:269
Memory< double > & GetMemory()
Return a reference to the Memory object used by the Vector.
Definition: vector.hpp:228
void SetPrintLevel(int print_lvl)
Definition: hypre.cpp:4010
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
Definition: hypre.cpp:4771
Memory< int > & GetMemoryI()
Definition: sparsemat.hpp:237
int poly_scale
Apply the polynomial smoother to A or D^{-1/2} A D^{-1/2}.
Definition: hypre.hpp:1001
void MergeDiagAndOffd(SparseMatrix &merged)
Get a single SparseMatrix containing all rows from this processor, merged from the diagonal and off-d...
Definition: hypre.cpp:1563
HashFunction & AppendDoubles(const double *doubles, size_t num_doubles)
Add a sequence of doubles for hashing, given as a c-array.
Definition: hash.hpp:509
void SetTol(double tol)
Definition: hypre.cpp:6099
HypreParVector * X1
Definition: hypre.hpp:985
void SetMaxIter(HYPRE_Int max_iter)
Definition: hypre.cpp:4751
HypreFGMRES(MPI_Comm comm)
Definition: hypre.cpp:4293
Memory< int > & GetMemoryJ()
Definition: sparsemat.hpp:253
void SetWindowParameters(double a, double b, double c)
Set parameters for windowing function for FIR smoother.
Definition: hypre.cpp:3472
void SetLoadBal(double loadbal)
Definition: hypre.cpp:4575
std::function< double(const Vector &)> f(double mass_coeff)
Definition: lor_mms.hpp:30
HypreGMRES(MPI_Comm comm)
Definition: hypre.cpp:4121
Vector & operator=(const double *v)
Copy Size() entries from v.
Definition: vector.cpp:119
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
Definition: hypre.cpp:4541
void SetReuse(int reuse)
Definition: hypre.cpp:4580
struct ::_p_PC * PC
Definition: petsc.hpp:72
bool MemoryClassContainsType(MemoryClass mc, MemoryType mt)
Return true iff the MemoryType mt is contained in the MemoryClass mc.
Definition: mem_manager.cpp:75
HypreLOBPCG(MPI_Comm comm)
Definition: hypre.cpp:6069
void Add(const DenseMatrix &A, const DenseMatrix &B, double alpha, DenseMatrix &C)
C = A + alpha*B.
Definition: densemat.cpp:2320
void EliminateBC(const HypreParMatrix &Ae, const Array< int > &ess_dof_list, const Vector &X, Vector &B) const
Eliminate essential BC specified by ess_dof_list from the solution X to the r.h.s. B.
Definition: hypre.cpp:2328
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
Definition: hypre.cpp:4459
int NumRows() const
Definition: array.hpp:377
void SetSymmetry(int sym)
Definition: hypre.cpp:4590
void HypreStealOwnership(HypreParMatrix &A_hyp, SparseMatrix &A_diag)
Make A_hyp steal ownership of its diagonal part A_diag.
Definition: hypre.cpp:2744
virtual ~HypreParaSails()
Definition: hypre.cpp:4595
void SetDevicePtrOwner(bool own) const
Set/clear the ownership flag for the device pointer. Ownership indicates whether the pointer will be ...
void SetMaxIter(int max_iter)
Definition: hypre.cpp:6115
bool Empty() const
Return true if the Memory object is empty, see Reset().
double * l1_norms
l1 norms of the rows of A
Definition: hypre.hpp:1009
Data type sparse matrix.
Definition: sparsemat.hpp:50
BlockInverseScaleJob
Definition: hypre.hpp:915
void SetOwnerFlags(signed char diag, signed char offd, signed char colmap)
Explicitly set the three ownership flags, see docs for diagOwner etc.
Definition: hypre.cpp:1372
virtual void Setup(const HypreParVector &b, HypreParVector &x) const
Set up the solver (if not set up already, also called automatically by HypreSolver::Mult).
Definition: hypre.cpp:3874
void SetLogging(int logging)
Definition: hypre.cpp:4005
virtual ~HypreSolver()
Definition: hypre.cpp:3941
virtual HYPRE_PtrToParSolverFcn SolveFcn() const =0
hypre&#39;s internal Solve function
virtual double * Write(bool on_dev=true)
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), on_dev).
Definition: vector.hpp:461
void SetRelTol(double rel_tol)
Definition: hypre.cpp:6425
void SetKDim(int dim)
Definition: hypre.cpp:4190
void mfem_error(const char *msg)
Function called when an error is encountered. Used by the macros MFEM_ABORT, MFEM_ASSERT, MFEM_VERIFY.
Definition: error.cpp:154
void SetResidualConvergenceOptions(int res_frequency=-1, double rtol=0.0)
Definition: hypre.cpp:4025
double b
Definition: lissajous.cpp:42
int to_int(const std::string &str)
Convert a string to an int.
Definition: text.hpp:62
Abort on hypre errors (default in base class)
Definition: hypre.hpp:1110
void SetLogging(int logging)
Definition: hypre.cpp:4362
void Print(const char *fname, HYPRE_Int offi=0, HYPRE_Int offj=0) const
Prints the locally owned rows in parallel.
Definition: hypre.cpp:2530
ParMesh * GetParMesh() const
Definition: pfespace.hpp:273
HypreParaSails(MPI_Comm comm)
Definition: hypre.cpp:4477
void HypreWrite()
Prepare the HypreParVector for write access in hypre&#39;s device memory space, HYPRE_MEMORY_DEVICE.
Definition: hypre.cpp:331
void SetTol(double tol)
Definition: hypre.cpp:4347
void WrapHypreParVector(hypre_ParVector *y, bool owner=true)
Converts hypre&#39;s format to HypreParVector.
Definition: hypre.cpp:255
void AbsMultTranspose(double a, const Vector &x, double b, Vector &y) const
Computes y = a * |At| * x + b * y, using entry-wise absolute values of the transpose of the matrix A...
Definition: hypre.cpp:1898
bool WrapVectors(const Vector &b, Vector &x) const
Makes the internal HypreParVectors B and X wrap the input vectors b and x.
Definition: hypre.cpp:3823
void Read(MPI_Comm comm, const char *fname)
Reads a HypreParVector from files saved with HypreParVector::Print.
Definition: hypre.cpp:393
HypreParMatrix * LeftDiagMult(const SparseMatrix &D, HYPRE_BigInt *row_starts=NULL) const
Multiply the HypreParMatrix on the left by a block-diagonal parallel matrix D and return the result a...
Definition: hypre.cpp:1915
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
Definition: hypre.cpp:5610
HypreParVector ** StealEigenvectors()
Transfer ownership of the converged eigenvectors.
Definition: hypre.cpp:6525
const HypreParMatrix * A
The linear system matrix.
Definition: hypre.hpp:1115
HYPRE_Int Randomize(HYPRE_Int seed)
Set random values.
Definition: hypre.cpp:383
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Solve Ax=b with hypre&#39;s FGMRES.
Definition: hypre.cpp:4381
virtual void MultTranspose(const Vector &b, Vector &x) const
Apply transpose of the smoother to relax the linear system Ax=b.
Definition: hypre.cpp:3772
Ignore hypre errors (see e.g. HypreADS)
Definition: hypre.hpp:1108
double relax_weight
Damping coefficient (usually <= 1)
Definition: hypre.hpp:993
HYPRE_BigInt * GetColStarts() const
Return the parallel column partitioning array.
Definition: hypre.hpp:650
Memory< HYPRE_Int > J
void SetElasticityOptions(ParFiniteElementSpace *fespace)
Definition: hypre.cpp:5078
Memory< double > auxB
Definition: hypre.hpp:1120
void Sort()
Sorts the array in ascending order. This requires operator< to be defined for T.
Definition: array.hpp:251
int Capacity() const
Return the size of the allocated memory.
void SetBJ(int bj)
Definition: hypre.cpp:4647
void GetDiag(Vector &diag) const
Get the local diagonal of the matrix.
Definition: hypre.cpp:1481
void RAP(const DenseMatrix &A, const DenseMatrix &P, DenseMatrix &RAP)
Definition: densemat.cpp:3232
void SetData(double *d)
Definition: vector.hpp:147
const T * Read(const Memory< T > &mem, int size, bool on_dev=true)
Get a pointer for read access to mem with the mfem::Device&#39;s DeviceMemoryClass, if on_dev = true...
Definition: device.hpp:319
const T * HostRead() const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), false).
Definition: array.hpp:311
void SetLevelOfFill(HYPRE_Int lev_fill)
Set the fill level for ILU(k); the default is k=1.
Definition: hypre.cpp:4741
void Reset()
Reset the memory to be empty, ensuring that Delete() will be a no-op.
HypreParMatrix * A
The linear system matrix.
Definition: hypre.hpp:975
HYPRE_BigInt GlobalTrueVSize() const
Definition: pfespace.hpp:281
Biwise-OR of all CUDA backends.
Definition: device.hpp:88
Dynamic 2D array using row-major layout.
Definition: array.hpp:354
virtual void SetOperator(const Operator &op)
Definition: hypre.cpp:3479
virtual HYPRE_PtrToParSolverFcn SetupFcn() const =0
hypre&#39;s internal Setup function
void SetLogging(int logging)
Definition: hypre.cpp:4195
virtual ~HypreADS()
Definition: hypre.cpp:5971
HypreParMatrix()
An empty matrix to be used as a reference to an existing matrix.
Definition: hypre.cpp:602
bool A_is_symmetric
A flag that indicates whether the linear system matrix A is symmetric.
Definition: hypre.hpp:1025
void EnsureMultTranspose() const
Ensure the action of the transpose is performed fast.
Definition: hypre.cpp:1707
void SetMaxIter(int max_iter)
Definition: hypre.cpp:4000
int ParCSRRelax_FIR(hypre_ParCSRMatrix *A, hypre_ParVector *f, double max_eig, int poly_order, double *fir_coeffs, hypre_ParVector *u, hypre_ParVector *x0, hypre_ParVector *x1, hypre_ParVector *x2, hypre_ParVector *x3)
Definition: hypre.cpp:3305
void SetMaxIter(int max_iter)
Definition: hypre.cpp:4352
void SetPolyOptions(int poly_order, double poly_fraction, int eig_est_cg_iter=10)
Set parameters for polynomial smoothing.
Definition: hypre.cpp:3441
void SetSOROptions(double relax_weight, double omega)
Set SOR-related parameters.
Definition: hypre.cpp:3435
MPI_Comm GetComm() const
MPI communicator.
Definition: hypre.hpp:534
T * HostWrite(Memory< T > &mem, int size)
Shortcut to Write(const Memory<T> &mem, int size, false)
Definition: device.hpp:343
void CopyMemory(Memory< T > &src, Memory< T > &dst, MemoryClass dst_mc, bool dst_owner)
Shallow or deep copy src to dst with the goal to make the array src accessible through dst with the M...
Definition: hypre.cpp:475
Wrapper for hypre&#39;s parallel vector class.
Definition: hypre.hpp:161
HypreParMatrix * EliminateCols(const Array< int > &cols)
Definition: hypre.cpp:2301
HypreParVector * X
Definition: hypre.hpp:1118
int ParCSRRelax_Taubin(hypre_ParCSRMatrix *A, hypre_ParVector *f, double lambda, double mu, int N, double max_eig, hypre_ParVector *u, hypre_ParVector *r)
Definition: hypre.cpp:3268
static MemoryType GetHostMemoryType()
Get the current Host MemoryType. This is the MemoryType used by most MFEM classes when allocating mem...
Definition: device.hpp:264
void forall(int N, lambda &&body)
Definition: forall.hpp:742
void PrintHash(std::ostream &out) const
Print sizes and hashes for all data arrays of the HypreParMatrix from the local MPI rank...
Definition: hypre.cpp:2611
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
Definition: globals.hpp:66
MemoryType
Memory types supported by MFEM.
Definition: mem_manager.hpp:31
void HypreRead() const
Prepare the HypreParVector for read access in hypre&#39;s device memory space, HYPRE_MEMORY_DEVICE.
Definition: hypre.cpp:312
void SetPrintLevel(int print_lvl)
Definition: hypre.cpp:5645
Memory< double > auxX
Definition: hypre.hpp:1120
void PrintCommPkg(std::ostream &out=mfem::out) const
Print information about the hypre_ParCSRCommPkg of the HypreParMatrix.
Definition: hypre.cpp:2574
HypreParVector * X
Definition: hypre.hpp:977
signed char OwnsColMap() const
Get colmap ownership flag.
Definition: hypre.hpp:553
void SetHostPtrOwner(bool own) const
Set/clear the ownership flag for the host pointer. Ownership indicates whether the pointer will be de...
void SetSize(int nsize)
Change the logical size of the array, keep existing entries.
Definition: array.hpp:687
void Solve()
Solve the eigenproblem.
Definition: hypre.cpp:6478
HYPRE_Int HYPRE_BigInt
void SetData(double *data_)
Sets the data of the Vector and the hypre_ParVector to data_.
Definition: hypre.cpp:306
void SetNumModes(int num_eigs)
Definition: hypre.cpp:6411
void SetStats(int stats)
Definition: hypre.cpp:4637
int Height() const
Get the height (size of output) of the Operator. Synonym with NumRows().
Definition: operator.hpp:66
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
Definition: hypre.hpp:1158
HypreParMatrix * ParAdd(const HypreParMatrix *A, const HypreParMatrix *B)
Returns the matrix A + B.
Definition: hypre.cpp:2813
double max_eig_est
Maximal eigenvalue estimate for polynomial smoothing.
Definition: hypre.hpp:1015
static bool Allows(unsigned long b_mask)
Return true if any of the backends in the backend mask, b_mask, are allowed.
Definition: device.hpp:258
int NumCols() const
Get the number of columns (size of input) of the Operator. Synonym with Width().
Definition: operator.hpp:75
A class to initialize the size of a Tensor.
Definition: dtensor.hpp:54
void SetDataAndSize(double *d, int s)
Set the Vector data and size.
Definition: vector.hpp:154
void SetRelTol(double rel_tol)
Definition: hypre.cpp:6105
HashFunction & AppendInts(const int_type *ints, size_t num_ints)
Add a sequence of integers for hashing, given as a c-array.
Definition: hash.hpp:489
HypreAME(MPI_Comm comm)
Definition: hypre.cpp:6369
double Norml1() const
Returns the l_1 norm of the vector.
Definition: vector.cpp:864
void MakeAlias(const Memory &base, int offset, int size)
Create a memory object that points inside the memory object base.
HypreParMatrix * Transpose() const
Returns the transpose of *this.
Definition: hypre.cpp:1611
void GetBlocks(Array2D< HypreParMatrix *> &blocks, bool interleaved_rows=false, bool interleaved_cols=false) const
Definition: hypre.cpp:1587
int height
Dimension of the output / number of rows in the matrix.
Definition: operator.hpp:27
void SetMaxIter(int max_iter)
Definition: hypre.cpp:6435
void SetFIRCoefficients(double max_eig)
Compute window and Chebyshev coefficients for given polynomial order.
Definition: hypre.cpp:3591
double a
Definition: lissajous.cpp:41
void EliminateRows(const Array< int > &rows)
Eliminate rows from the diagonal and off-diagonal blocks of the matrix.
Definition: hypre.cpp:2315
double * fir_coeffs
Combined coefficients for windowing and Chebyshev polynomials.
Definition: hypre.hpp:1022
void MakeDataOwner() const
Set the Vector data (host pointer) ownership flag.
Definition: vector.hpp:185
void SetLevel(int level)
Definition: hypre.cpp:4632
void Threshold(double threshold=0.0)
Remove values smaller in absolute value than some threshold.
Definition: hypre.cpp:2145
const T * HostRead(const Memory< T > &mem, int size)
Shortcut to Read(const Memory<T> &mem, int size, false)
Definition: device.hpp:326
double InnerProduct(HypreParVector *x, HypreParVector *y)
Definition: hypre.cpp:414
virtual ~HypreSmoother()
Definition: hypre.cpp:3782
void SetPrintLevel(HYPRE_Int print_level)
Set the print level: 0 = none, 1 = setup, 2 = solve, 3 = setup+solve.
Definition: hypre.cpp:4766
Host memory; using new[] and delete[].
constexpr MemoryClass GetHypreMemoryClass()
The MemoryClass used by Hypre objects.
Definition: hypre.hpp:137
void Print(const char *fname) const
Prints the locally owned rows in parallel.
Definition: hypre.cpp:388
void HypreReadWrite()
Prepare the HypreParVector for read and write access in hypre&#39;s device memory space, HYPRE_MEMORY_DEVICE.
Definition: hypre.cpp:322
Memory< HYPRE_Int > & GetDiagMemoryI()
Definition: hypre.hpp:870
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
Definition: hypre.cpp:4938
void New(int size)
Allocate host memory for size entries with the current host memory type returned by MemoryManager::Ge...
void InvScaleRows(const Vector &s)
Scale the local row i by 1./s(i)
Definition: hypre.cpp:2056
int Size() const
Returns the number of TYPE I elements.
Definition: table.hpp:92
void SetTaubinOptions(double lambda, double mu, int iter)
Set parameters for Taubin&#39;s lambda-mu method.
Definition: hypre.cpp:3449
HypreParVector * B
Right-hand side and solution vectors.
Definition: hypre.hpp:977
MemoryType GetDeviceMemoryType() const
Return the device MemoryType of the Memory object. If the device MemoryType is not set...
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
Definition: hypre.cpp:4667
double mu
Definition: ex25.cpp:140
int NumCols() const
Definition: array.hpp:378
A simple singleton class for hypre&#39;s global settings, that 1) calls HYPRE_Init() and sets some GPU-re...
Definition: hypre.hpp:72
void ClearOwnerFlags() const
Clear the ownership flags for the host and device pointers, as well as any internal data allocated by...
T * ReadWrite(MemoryClass mc, int size)
Get read-write access to the memory with the given MemoryClass.
DiagonalPolicy
Defines operator diagonal policy upon elimination of rows and/or columns.
Definition: operator.hpp:47
int dim
Definition: ex24.cpp:53
MemoryType GetMemoryType(MemoryClass mc)
Return a suitable MemoryType for a given MemoryClass.
Definition: mem_manager.cpp:60
virtual ~HyprePCG()
Definition: hypre.cpp:4115
void SetOperator(Operator &A)
Definition: hypre.cpp:6145
void SetPreconditioner(HypreSolver &precond)
Set the hypre solver to be used as a preconditioner.
Definition: hypre.cpp:4015
void Destroy()
Destroy a vector.
Definition: vector.hpp:594
void SetLocalReordering(HYPRE_Int reorder_type)
Definition: hypre.cpp:4761
void HypreRead() const
Update the internal hypre_ParCSRMatrix object, A, to be in hypre memory space.
Definition: hypre.hpp:854
int relax_times
Number of relaxation sweeps.
Definition: hypre.hpp:991
Class used by MFEM to store pointers to host and/or device memory.
void operator*=(double s)
Scale all entries by s: A_scaled = s*A.
Definition: hypre.cpp:2102
void SetPrintLevel(int logging)
Definition: hypre.cpp:6441
double infinity()
Define a shortcut for std::numeric_limits<double>::infinity()
Definition: vector.hpp:44
double ParNormlp(const Vector &vec, double p, MPI_Comm comm)
Compute the l_p norm of the Vector which is split without overlap across the given communicator...
Definition: hypre.cpp:425
int eig_est_cg_iter
Number of CG iterations to determine eigenvalue estimates.
Definition: hypre.hpp:1013
void SetOwnership(int own)
Sets ownership of the internal hypre_ParVector.
Definition: hypre.hpp:251
double Norml2() const
Returns the l2 norm of the vector.
Definition: vector.cpp:835
Abstract class for hypre&#39;s solvers and preconditioners.
Definition: hypre.hpp:1102
int Size_of_connections() const
Definition: table.hpp:98
ErrorMode error_mode
How to treat hypre errors.
Definition: hypre.hpp:1126
void AbsMult(double a, const Vector &x, double b, Vector &y) const
Computes y = a * |A| * x + b * y, using entry-wise absolute values of the matrix A.
Definition: hypre.cpp:1881
bool OwnsHostPtr() const
Return true if the host pointer is owned. Ownership indicates whether the pointer will be deleted by ...
void SetMemory(int mem)
Definition: hypre.cpp:4642
int Size() const
Return the logical size of the array.
Definition: array.hpp:141
RefCoord t[3]
HypreParVector & operator=(double d)
Set constant values.
Definition: hypre.cpp:274
MPI_Comm GetComm() const
Definition: pfespace.hpp:269
const double alpha
Definition: ex15.cpp:369
Vector data type.
Definition: vector.hpp:58
void SetAbsTol(double tol)
Definition: hypre.cpp:4180
void delete_hypre_ParCSRMatrixColMapOffd(hypre_ParCSRMatrix *A)
Definition: hypre.cpp:2677
void CopyConvertMemory(Memory< SrcT > &src, MemoryClass dst_mc, Memory< DstT > &dst)
Deep copy and convert src to dst with the goal to make the array src accessible through dst with the ...
Definition: hypre.cpp:510
hypre_ParCSRMatrix * StealData()
Changes the ownership of the matrix.
Definition: hypre.cpp:1353
void SetPrecondUsageMode(int pcg_mode)
Definition: hypre.cpp:6130
void SetOperator(const HypreParMatrix &A)
Definition: hypre.cpp:6456
void SetSystemsOptions(int dim, bool order_bynodes=false)
Definition: hypre.cpp:4957
void SetFilter(double filter)
Definition: hypre.cpp:4570
void DropSmallEntries(double tol)
Wrapper for hypre_ParCSRMatrixDropSmallEntries in different versions of hypre. Drop off-diagonal entr...
Definition: hypre.cpp:2230
double sigma(const Vector &x)
Definition: maxwell.cpp:102
void SetMaxIter(int max_iter)
Definition: hypre.cpp:4185
RefCoord s[3]
HYPRE_BigInt GetGlobalNumCols() const
Return the global number of columns.
Definition: hypre.hpp:639
Vector * GlobalVector() const
Returns the global vector in each processor.
Definition: hypre.cpp:264
void GetOffd(SparseMatrix &offd, HYPRE_BigInt *&cmap) const
Get the local off-diagonal block. NOTE: &#39;offd&#39; will not own any data.
Definition: hypre.cpp:1557
double omega
SOR parameter (usually in (0,2))
Definition: hypre.hpp:995
HYPRE_BigInt M() const
Returns the global number of rows.
Definition: hypre.hpp:583
signed char OwnsOffd() const
Get offd ownership flag.
Definition: hypre.hpp:551
double u(const Vector &xvec)
Definition: lor_mms.hpp:22
Base class for solvers.
Definition: operator.hpp:682
HYPRE_Int MultTranspose(HypreParVector &x, HypreParVector &y, double alpha=1.0, double beta=0.0) const
Computes y = alpha * A^t * x + beta * y.
Definition: hypre.cpp:1872
void WrapHypreParCSRMatrix(hypre_ParCSRMatrix *a, bool owner=true)
Converts hypre&#39;s format to HypreParMatrix.
Definition: hypre.cpp:608
Abstract operator.
Definition: operator.hpp:24
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:343
void HostReadWrite()
Update the internal hypre_ParCSRMatrix object, A, to be on host.
Definition: hypre.hpp:843
Memory< double > auxB
Auxiliary buffers for the case when the input or output arrays in methods like Mult(const Vector &...
Definition: hypre.hpp:981
void Swap(SparseMatrix &other)
Definition: sparsemat.cpp:4182
~HypreParVector()
Calls hypre&#39;s destroy function.
Definition: hypre.cpp:405
HypreParMatrix * HypreParMatrixFromBlocks(Array2D< HypreParMatrix *> &blocks, Array2D< double > *blockCoeff)
Returns a merged hypre matrix constructed from hypre matrix blocks.
Definition: hypre.cpp:3020
Memory< double > & GetDiagMemoryData()
Definition: hypre.hpp:872
HypreParVector * Z
Definition: hypre.hpp:983
void Solve()
Solve the eigenproblem.
Definition: hypre.cpp:6259
HYPRE_BigInt * GetTrueDofOffsets() const
Definition: pfespace.hpp:278
virtual ~HypreFGMRES()
Definition: hypre.cpp:4453
HypreParVector * V
Temporary vectors.
Definition: hypre.hpp:983
double Normlinf() const
Returns the l_infinity norm of the vector.
Definition: vector.cpp:853
void SetType(HypreSmoother::Type type, int relax_times=1)
Set the relaxation type and number of sweeps.
Definition: hypre.cpp:3429
void GatherBlockOffsetData(MPI_Comm comm, const int rank, const int nprocs, const int num_loc, const Array< int > &offsets, std::vector< int > &all_num_loc, const int numBlocks, std::vector< std::vector< HYPRE_BigInt >> &blockProcOffsets, std::vector< HYPRE_BigInt > &procOffsets, std::vector< std::vector< int >> &procBlockOffsets, HYPRE_BigInt &firstLocal, HYPRE_BigInt &globalNum)
Definition: hypre.cpp:2965
virtual double * HostReadWrite()
Shortcut for mfem::ReadWrite(vec.GetMemory(), vec.Size(), false).
Definition: vector.hpp:473
MemoryClass
Memory classes identify sets of memory types.
Definition: mem_manager.hpp:73
HYPRE_BigInt GetGlobalNumRows() const
Return the global number of rows.
Definition: hypre.hpp:635
void WrapMemoryReadWrite(Memory< double > &mem)
Replace the HypreParVector&#39;s data with the given Memory, mem, and prepare the vector for read and wri...
Definition: hypre.cpp:355
HYPRE_Int Mult(HypreParVector &x, HypreParVector &y, double alpha=1.0, double beta=0.0) const
Computes y = alpha * A * x + beta * y.
Definition: hypre.cpp:1736
int width
Dimension of the input / number of columns in the matrix.
Definition: operator.hpp:28
HypreParVector CreateCompatibleVector() const
Constructs a HypreParVector compatible with the calling vector.
Definition: hypre.cpp:238
int poly_order
Order of the smoothing polynomial.
Definition: hypre.hpp:997
void SetTol(HYPRE_Real tol)
Definition: hypre.cpp:4756
int GetNumRows() const
Returns the number of rows in the diagonal block of the ParCSRMatrix.
Definition: hypre.hpp:621
double lambda
Taubin&#39;s lambda-mu method parameters.
Definition: hypre.hpp:1004
void WrapMemoryRead(const Memory< double > &mem)
Replace the HypreParVector&#39;s data with the given Memory, mem, and prepare the vector for read access ...
Definition: hypre.cpp:340
const HypreParVector & GetEigenvector(unsigned int i) const
Extract a single eigenvector.
Definition: hypre.cpp:6514
void HypreReadWrite()
Update the internal hypre_ParCSRMatrix object, A, to be in hypre memory space.
Definition: hypre.hpp:861
HYPRE_BigInt * GetRowStarts() const
Return the parallel row partitioning array.
Definition: hypre.hpp:645