MFEM v4.8.0
Finite element discretization library
Loading...
Searching...
No Matches
ginkgo.cpp
Go to the documentation of this file.
1// Copyright (c) 2010-2025, Lawrence Livermore National Security, LLC. Produced
2// at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3// LICENSE and NOTICE for details. LLNL-CODE-806117.
4//
5// This file is part of the MFEM library. For more information and source code
6// availability visit https://mfem.org.
7//
8// MFEM is free software; you can redistribute it and/or modify it under the
9// terms of the BSD-3 license. We welcome feedback and contributions, see file
10// CONTRIBUTING.md for details.
11
12#include "../config/config.hpp"
13
14#ifdef MFEM_USE_GINKGO
15
16#include "ginkgo.hpp"
17#include "sparsemat.hpp"
19#include "../general/error.hpp"
20#include <iostream>
21#include <iomanip>
22#include <algorithm>
23#include <cmath>
24#include <cstring>
25
26namespace mfem
27{
28
29namespace Ginkgo
30{
31
32// Create a GinkgoExecutor of type exec_type.
34{
35#if defined(MFEM_USE_CUDA) || defined(MFEM_USE_HIP)
36 gko::version_info gko_version = gko::version_info::get();
37 bool gko_with_omp_support = (strcmp(gko_version.omp_version.tag,
38 "not compiled") != 0);
39#endif
40 switch (exec_type)
41 {
43 {
44 executor = gko::ReferenceExecutor::create();
45 break;
46 }
48 {
49 executor = gko::OmpExecutor::create();
50 break;
51 }
53 {
54 if (gko::CudaExecutor::get_num_devices() > 0)
55 {
56#ifdef MFEM_USE_CUDA
57 int current_device = 0;
58 MFEM_GPU_CHECK(cudaGetDevice(&current_device));
59 if (gko_with_omp_support)
60 {
61 executor = gko::CudaExecutor::create(current_device,
62 gko::OmpExecutor::create());
63 }
64 else
65 {
66 executor = gko::CudaExecutor::create(current_device,
67 gko::ReferenceExecutor::create());
68 }
69#endif
70 }
71 else
72 {
73 MFEM_ABORT("gko::CudaExecutor::get_num_devices() did not report "
74 "any valid devices.");
75 }
76 break;
77 }
79 {
80 if (gko::HipExecutor::get_num_devices() > 0)
81 {
82#ifdef MFEM_USE_HIP
83 int current_device = 0;
84 MFEM_GPU_CHECK(hipGetDevice(&current_device));
85 if (gko_with_omp_support)
86 {
87 executor = gko::HipExecutor::create(current_device,
88 gko::OmpExecutor::create());
89 }
90 else
91 {
92 executor = gko::HipExecutor::create(current_device,
93 gko::ReferenceExecutor::create());
94 }
95#endif
96 }
97 else
98 {
99 MFEM_ABORT("gko::HipExecutor::get_num_devices() did not report "
100 "any valid devices.");
101 }
102 break;
103 }
104 default:
105 MFEM_ABORT("Invalid ExecType specified");
106 }
107}
108
109// Create a GinkgoExecutor of type exec_type, with host_exec_type for the
110// related CPU Executor (only applicable to GPU backends).
112{
113 switch (exec_type)
114 {
116 {
117 MFEM_WARNING("Parameter host_exec_type ignored for CPU GinkgoExecutor.");
118 executor = gko::ReferenceExecutor::create();
119 break;
120 }
122 {
123 MFEM_WARNING("Parameter host_exec_type ignored for CPU GinkgoExecutor.");
124 executor = gko::OmpExecutor::create();
125 break;
126 }
128 {
129 if (gko::CudaExecutor::get_num_devices() > 0)
130 {
131#ifdef MFEM_USE_CUDA
132 int current_device = 0;
133 MFEM_GPU_CHECK(cudaGetDevice(&current_device));
134 if (host_exec_type == GinkgoExecutor::OMP)
135 {
136 executor = gko::CudaExecutor::create(current_device,
137 gko::OmpExecutor::create());
138 }
139 else
140 {
141 executor = gko::CudaExecutor::create(current_device,
142 gko::ReferenceExecutor::create());
143 }
144#endif
145 }
146 else
147 {
148 MFEM_ABORT("gko::CudaExecutor::get_num_devices() did not report "
149 "any valid devices.");
150 }
151 break;
152 }
154 {
155 if (gko::HipExecutor::get_num_devices() > 0)
156 {
157#ifdef MFEM_USE_HIP
158 int current_device = 0;
159 MFEM_GPU_CHECK(hipGetDevice(&current_device));
160 if (host_exec_type == GinkgoExecutor::OMP)
161 {
162 executor = gko::HipExecutor::create(current_device,
163 gko::OmpExecutor::create());
164 }
165 else
166 {
167 executor = gko::HipExecutor::create(current_device,
168 gko::ReferenceExecutor::create());
169 }
170#endif
171 }
172 else
173 {
174 MFEM_ABORT("gko::HipExecutor::get_num_devices() did not report "
175 "any valid devices.");
176 }
177 break;
178 }
179 default:
180 MFEM_ABORT("Invalid ExecType specified");
181 }
182}
183
184// Create a GinkgoExecutor to match MFEM's device configuration.
186{
187 gko::version_info gko_version = gko::version_info::get();
188 bool gko_with_omp_support = (strcmp(gko_version.omp_version.tag,
189 "not compiled") != 0);
190 if (mfem_device.Allows(Backend::CUDA_MASK))
191 {
192 if (gko::CudaExecutor::get_num_devices() > 0)
193 {
194#ifdef MFEM_USE_CUDA
195 int current_device = 0;
196 MFEM_GPU_CHECK(cudaGetDevice(&current_device));
197 if (gko_with_omp_support)
198 {
199 executor = gko::CudaExecutor::create(current_device,
200 gko::OmpExecutor::create());
201 }
202 else
203 {
204 executor = gko::CudaExecutor::create(current_device,
205 gko::ReferenceExecutor::create());
206 }
207#endif
208 }
209 else
210 {
211 MFEM_ABORT("gko::CudaExecutor::get_num_devices() did not report "
212 "any valid devices.");
213 }
214 }
215 else if (mfem_device.Allows(Backend::HIP_MASK))
216 {
217 if (gko::HipExecutor::get_num_devices() > 0)
218 {
219#ifdef MFEM_USE_HIP
220 int current_device = 0;
221 MFEM_GPU_CHECK(hipGetDevice(&current_device));
222 if (gko_with_omp_support)
223 {
224 executor = gko::HipExecutor::create(current_device,
225 gko::OmpExecutor::create());
226 }
227 else
228 {
229 executor = gko::HipExecutor::create(current_device,
230 gko::ReferenceExecutor::create());
231 }
232#endif
233 }
234 else
235 {
236 MFEM_ABORT("gko::HipExecutor::get_num_devices() did not report "
237 "any valid devices.");
238 }
239 }
240 else
241 {
242 if (mfem_device.Allows(Backend::OMP_MASK))
243 {
244 // Also use OpenMP for Ginkgo, if Ginkgo supports it
245 if (gko_with_omp_support)
246 {
247 executor = gko::OmpExecutor::create();
248 }
249 else
250 {
251 executor = gko::ReferenceExecutor::create();
252 }
253 }
254 else
255 {
256 executor = gko::ReferenceExecutor::create();
257 }
258 }
259}
260
261// Create a GinkgoExecutor to match MFEM's device configuration, with
262// a specific host_exec_type for the associated CPU Executor (only
263// applicable to GPU backends).
264GinkgoExecutor::GinkgoExecutor(Device &mfem_device, ExecType host_exec_type)
265{
266
267 if (mfem_device.Allows(Backend::CUDA_MASK))
268 {
269 if (gko::CudaExecutor::get_num_devices() > 0)
270 {
271#ifdef MFEM_USE_CUDA
272 int current_device = 0;
273 MFEM_GPU_CHECK(cudaGetDevice(&current_device));
274 if (host_exec_type == GinkgoExecutor::OMP)
275 {
276 executor = gko::CudaExecutor::create(current_device,
277 gko::OmpExecutor::create());
278 }
279 else
280 {
281 executor = gko::CudaExecutor::create(current_device,
282 gko::ReferenceExecutor::create());
283 }
284#endif
285 }
286 else
287 {
288 MFEM_ABORT("gko::CudaExecutor::get_num_devices() did not report "
289 "any valid devices.");
290 }
291 }
292 else if (mfem_device.Allows(Backend::HIP_MASK))
293 {
294 if (gko::HipExecutor::get_num_devices() > 0)
295 {
296#ifdef MFEM_USE_HIP
297 int current_device = 0;
298 MFEM_GPU_CHECK(hipGetDevice(&current_device));
299 if (host_exec_type == GinkgoExecutor::OMP)
300 {
301 executor = gko::HipExecutor::create(current_device,
302 gko::OmpExecutor::create());
303 }
304 else
305 {
306 executor = gko::HipExecutor::create(current_device,
307 gko::ReferenceExecutor::create());
308 }
309#endif
310 }
311 else
312 {
313 MFEM_ABORT("gko::HipExecutor::get_num_devices() did not report "
314 "any valid devices.");
315 }
316 }
317 else
318 {
319 MFEM_WARNING("Parameter host_exec_type ignored for CPU GinkgoExecutor.");
320 if (mfem_device.Allows(Backend::OMP_MASK))
321 {
322 // Also use OpenMP for Ginkgo, if Ginkgo supports it
323 gko::version_info gko_version = gko::version_info::get();
324 bool gko_with_omp_support = (strcmp(gko_version.omp_version.tag,
325 "not compiled") != 0);
326 if (gko_with_omp_support)
327 {
328 executor = gko::OmpExecutor::create();
329 }
330 else
331 {
332 executor = gko::ReferenceExecutor::create();
333 }
334 }
335 else
336 {
337 executor = gko::ReferenceExecutor::create();
338 }
339 }
340}
341
343 bool use_implicit_res_norm)
344 : Solver(),
345 use_implicit_res_norm(use_implicit_res_norm)
346{
347 executor = exec.GetExecutor();
348 print_level = -1;
349
350 // Build default stopping criterion factory
351 max_iter = 10;
352 rel_tol = 0.0;
353 abs_tol = 0.0;
354 this->update_stop_factory();
355
356 needs_wrapped_vecs = false;
358}
359
361{
362 using ResidualCriterionFactory = gko::stop::ResidualNorm<>;
363 using ImplicitResidualCriterionFactory = gko::stop::ImplicitResidualNorm<>;
364
366 {
367 imp_rel_criterion = ImplicitResidualCriterionFactory::build()
368 .with_reduction_factor(sqrt(rel_tol))
369 .with_baseline(gko::stop::mode::initial_resnorm)
370 .on(executor);
371 imp_abs_criterion = ImplicitResidualCriterionFactory::build()
372 .with_reduction_factor(sqrt(abs_tol))
373 .with_baseline(gko::stop::mode::absolute)
374 .on(executor);
376 gko::stop::Combined::build()
377 .with_criteria(imp_rel_criterion,
379 gko::stop::Iteration::build()
380 .with_max_iters(static_cast<unsigned long>(max_iter))
381 .on(executor))
382 .on(executor);
383 }
384 else
385 {
386 rel_criterion = ResidualCriterionFactory::build()
387 .with_reduction_factor(rel_tol)
388 .with_baseline(gko::stop::mode::initial_resnorm)
389 .on(executor);
390 abs_criterion = ResidualCriterionFactory::build()
391 .with_reduction_factor(abs_tol)
392 .with_baseline(gko::stop::mode::absolute)
393 .on(executor);
395 gko::stop::Combined::build()
396 .with_criteria(rel_criterion,
398 gko::stop::Iteration::build()
399 .with_max_iters(static_cast<unsigned long>(max_iter))
400 .on(executor))
401 .on(executor);
402 }
403}
404
405void
406GinkgoIterativeSolver::initialize_ginkgo_log(gko::matrix::Dense<real_t>* b)
407const
408{
409 // Add the logger object. See the different masks available in Ginkgo's
410 // documentation
411 convergence_logger = gko::log::Convergence<>::create(
412 gko::log::Logger::criterion_check_completed_mask);
413 residual_logger = std::make_shared<ResidualLogger<>>(executor,
414 system_oper.get(),b);
415
416}
417
418void OperatorWrapper::apply_impl(const gko::LinOp *b, gko::LinOp *x) const
419{
420
421 // Cast to VectorWrapper; only accept this type for this impl
422 const VectorWrapper *mfem_b = gko::as<const VectorWrapper>(b);
423 VectorWrapper *mfem_x = gko::as<VectorWrapper>(x);
424
425 this->wrapped_oper->Mult(mfem_b->get_mfem_vec_const_ref(),
426 mfem_x->get_mfem_vec_ref());
427}
428void OperatorWrapper::apply_impl(const gko::LinOp *alpha,
429 const gko::LinOp *b,
430 const gko::LinOp *beta,
431 gko::LinOp *x) const
432{
433 // x = alpha * op (b) + beta * x
434 // Cast to VectorWrapper; only accept this type for this impl
435 const VectorWrapper *mfem_b = gko::as<const VectorWrapper>(b);
436 VectorWrapper *mfem_x = gko::as<VectorWrapper>(x);
437
438 // Check that alpha and beta are Dense<real_t> of size (1,1):
439 if (alpha->get_size()[0] > 1 || alpha->get_size()[1] > 1)
440 {
441 throw gko::BadDimension(
442 __FILE__, __LINE__, __func__, "alpha", alpha->get_size()[0],
443 alpha->get_size()[1],
444 "Expected an object of size [1 x 1] for scaling "
445 " in this operator's apply_impl");
446 }
447 if (beta->get_size()[0] > 1 || beta->get_size()[1] > 1)
448 {
449 throw gko::BadDimension(
450 __FILE__, __LINE__, __func__, "beta", beta->get_size()[0],
451 beta->get_size()[1],
452 "Expected an object of size [1 x 1] for scaling "
453 " in this operator's apply_impl");
454 }
455 real_t alpha_f;
456 real_t beta_f;
457
458 if (alpha->get_executor() == alpha->get_executor()->get_master())
459 {
460 // Access value directly
461 alpha_f = gko::as<gko::matrix::Dense<real_t>>(alpha)->at(0, 0);
462 }
463 else
464 {
465 // Copy from device to host
466 this->get_executor()->get_master().get()->copy_from(
467 this->get_executor().get(),
468 1, gko::as<gko::matrix::Dense<real_t>>(alpha)->get_const_values(),
469 &alpha_f);
470 }
471 if (beta->get_executor() == beta->get_executor()->get_master())
472 {
473 // Access value directly
474 beta_f = gko::as<gko::matrix::Dense<real_t>>(beta)->at(0, 0);
475 }
476 else
477 {
478 // Copy from device to host
479 this->get_executor()->get_master().get()->copy_from(
480 this->get_executor().get(),
481 1, gko::as<gko::matrix::Dense<real_t>>(beta)->get_const_values(),
482 &beta_f);
483 }
484 // Scale x by beta
485 mfem_x->get_mfem_vec_ref() *= beta_f;
486 // Multiply operator with b and store in tmp
487 mfem::Vector mfem_tmp =
488 mfem::Vector(mfem_x->get_size()[0],
490 // Set UseDevice flag to match mfem_x (not automatically done through
491 // MemoryType)
492 mfem_tmp.UseDevice(mfem_x->get_mfem_vec_ref().UseDevice());
493
494 // Apply the operator
495 this->wrapped_oper->Mult(mfem_b->get_mfem_vec_const_ref(), mfem_tmp);
496 // Scale tmp by alpha and add
497 mfem_x->get_mfem_vec_ref().Add(alpha_f, mfem_tmp);
498
499 mfem_tmp.Destroy();
500}
501
502void
504{
505
506 MFEM_VERIFY(system_oper, "System matrix or operator not initialized");
507 MFEM_VERIFY(executor, "executor is not initialized");
508 MFEM_VERIFY(y.Size() == x.Size(),
509 "Mismatching sizes for rhs and solution");
510
511 using vec = gko::matrix::Dense<real_t>;
512 if (!iterative_mode)
513 {
514 y = 0.0;
515 }
516
517 // Create x and y vectors in Ginkgo's format. Wrap MFEM's data directly,
518 // on CPU or GPU.
519 bool on_device = false;
520 if (executor != executor->get_master())
521 {
522 on_device = true;
523 }
524 std::unique_ptr<vec> gko_x;
525 std::unique_ptr<vec> gko_y;
526
527 // If we do not have an OperatorWrapper for the system operator or
528 // preconditioner, or have an inner solver using VectorWrappers (as
529 // for IR), then directly create Ginkgo vectors from MFEM's data.
531 {
532 gko_x = vec::create(executor, gko::dim<2>(x.Size(), 1),
534 x.Size(), const_cast<real_t *>(
535 x.Read(on_device))), 1);
536 gko_y = vec::create(executor, gko::dim<2>(y.Size(), 1),
538 y.Size(),
539 y.ReadWrite(on_device)), 1);
540 }
541 else // We have at least one wrapped MFEM operator; need wrapped vectors
542 {
543 gko_x = std::unique_ptr<vec>(
544 new VectorWrapper(executor, x.Size(),
545 const_cast<Vector *>(&x), false));
546 gko_y = std::unique_ptr<vec>(
547 new VectorWrapper(executor, y.Size(), &y,
548 false));
549 }
550
551 // Create the logger object to log some data from the solvers to confirm
552 // convergence.
553 initialize_ginkgo_log(gko_x.get());
554
555 MFEM_VERIFY(convergence_logger, "convergence logger not initialized" );
556 if (print_level==1)
557 {
558 MFEM_VERIFY(residual_logger, "residual logger not initialized" );
559 solver->clear_loggers(); // Clear any loggers from previous Mult() calls
560 solver->add_logger(residual_logger);
561 }
562
563 // Add the convergence logger object to the combined factory to retrieve the
564 // solver and other data
565 combined_factory->clear_loggers();
567
568 // Finally, apply the solver to x and get the solution in y.
569 solver->apply(gko_x, gko_y);
570
571 // Get the number of iterations taken to converge to the solution.
572 final_iter = convergence_logger->get_num_iterations();
573
574 // Some residual norm and convergence print outs.
575 real_t final_res_norm = 0.0;
576
577 // The convergence_logger object contains the residual vector after the
578 // solver has returned. use this vector to compute the residual norm of the
579 // solution. Get the residual norm from the logger. As the convergence logger
580 // returns a `linop`, it is necessary to convert it to a Dense matrix.
581 // Additionally, if the logger is logging on the gpu, it is necessary to copy
582 // the data to the host and hence the `residual_norm_d_master`
583 auto residual_norm = convergence_logger->get_residual_norm();
584 auto imp_residual_norm = convergence_logger->get_implicit_sq_resnorm();
585
587 {
588 auto imp_residual_norm_d =
589 gko::as<gko::matrix::Dense<real_t>>(imp_residual_norm);
590 auto imp_residual_norm_d_master =
591 gko::matrix::Dense<real_t>::create(executor->get_master(),
592 gko::dim<2> {1, 1});
593 imp_residual_norm_d_master->copy_from(imp_residual_norm_d);
594
595 final_res_norm = imp_residual_norm_d_master->at(0,0);
596 }
597 else
598 {
599 auto residual_norm_d =
600 gko::as<gko::matrix::Dense<real_t>>(residual_norm);
601 auto residual_norm_d_master =
602 gko::matrix::Dense<real_t>::create(executor->get_master(),
603 gko::dim<2> {1, 1});
604 residual_norm_d_master->copy_from(residual_norm_d);
605
606 final_res_norm = residual_norm_d_master->at(0,0);
607 }
608
609 converged = 0;
610 if (convergence_logger->has_converged())
611 {
612 converged = 1;
613 }
614
615 if (print_level == 1)
616 {
617 residual_logger->write();
618 }
619 if (converged == 0)
620 {
621 mfem::err << "No convergence!" << '\n';
622 }
623 if (print_level >=2 && converged==1 )
624 {
625 mfem::out << "Converged in " << final_iter <<
626 " iterations with final residual norm "
627 << final_res_norm << '\n';
628 }
629}
630
632{
633
634 if (system_oper)
635 {
636 // If the solver currently needs VectorWrappers, but not due to a
637 // "sub-operator" (preconditioner or inner solver), then it's
638 // because the current system_oper needs them. Reset the property
639 // to false in case the new op is a SparseMatrix.
640 if (needs_wrapped_vecs == true && sub_op_needs_wrapped_vecs == false)
641 {
642 needs_wrapped_vecs = false;
643 }
644 // Reset the pointer
645 system_oper.reset();
646 // Reset the solver generated for the previous operator
647 solver.reset();
648 }
649
650 // Check for SparseMatrix:
651 SparseMatrix *op_mat = const_cast<SparseMatrix*>(
652 dynamic_cast<const SparseMatrix*>(&op));
653 if (op_mat != NULL)
654 {
655 // Needs to be a square matrix
656 MFEM_VERIFY(op_mat->Height() == op_mat->Width(),
657 "System matrix is not square");
658
659 bool on_device = false;
660 if (executor != executor->get_master())
661 {
662 on_device = true;
663 }
664
665 using mtx = gko::matrix::Csr<real_t, int>;
666 const int nnz = op_mat->GetMemoryData().Capacity();
667 system_oper = mtx::create(
668 executor, gko::dim<2>(op_mat->Height(), op_mat->Width()),
670 nnz,
671 op_mat->ReadWriteData(on_device)),
673 nnz,
674 op_mat->ReadWriteJ(on_device)),
675 gko_array<int>::view(executor, op_mat->Height() + 1,
676 op_mat->ReadWriteI(on_device)));
677
678 }
679 else
680 {
681 needs_wrapped_vecs = true;
682 system_oper = std::shared_ptr<OperatorWrapper>(
683 new OperatorWrapper(executor, op.Height(), &op));
684 }
685
686 // Set MFEM Solver size values
687 height = op.Height();
688 width = op.Width();
689
690 // Generate the solver from the solver using the system matrix or operator.
691 solver = solver_gen->generate(system_oper);
692}
693
694/* ---------------------- CGSolver ------------------------ */
696 : EnableGinkgoSolver(exec, true)
697{
698 using cg = gko::solver::Cg<real_t>;
699 this->solver_gen =
700 cg::build().with_criteria(this->combined_factory).on(this->executor);
701}
702
704 const GinkgoPreconditioner &preconditioner)
705 : EnableGinkgoSolver(exec, true)
706{
707 using cg = gko::solver::Cg<real_t>;
708 // Check for a previously-generated preconditioner (for a specific matrix)
709 if (preconditioner.HasGeneratedPreconditioner())
710 {
711 this->solver_gen = cg::build()
712 .with_criteria(this->combined_factory)
713 .with_generated_preconditioner(
714 preconditioner.GetGeneratedPreconditioner())
715 .on(this->executor);
716 if (dynamic_cast<const OperatorWrapper*>(preconditioner.
717 GetGeneratedPreconditioner().get()))
718 {
719 this->sub_op_needs_wrapped_vecs = true;
720 this->needs_wrapped_vecs = true;
721 }
722 }
723 else // Pass a preconditioner factory (will use same matrix as the solver)
724 {
725 this->solver_gen = cg::build()
726 .with_criteria(this->combined_factory)
727 .with_preconditioner(preconditioner.GetFactory())
728 .on(this->executor);
729 }
730}
731
732
733/* ---------------------- BICGSTABSolver ------------------------ */
735 : EnableGinkgoSolver(exec, true)
736{
737 using bicgstab = gko::solver::Bicgstab<real_t>;
738 this->solver_gen = bicgstab::build()
739 .with_criteria(this->combined_factory)
740 .on(this->executor);
741}
742
744 const GinkgoPreconditioner &preconditioner)
745 : EnableGinkgoSolver(exec, true)
746{
747 using bicgstab = gko::solver::Bicgstab<real_t>;
748 if (preconditioner.HasGeneratedPreconditioner())
749 {
750 this->solver_gen = bicgstab::build()
751 .with_criteria(this->combined_factory)
752 .with_generated_preconditioner(
753 preconditioner.GetGeneratedPreconditioner())
754 .on(this->executor);
755 if (dynamic_cast<const OperatorWrapper*>(preconditioner.
756 GetGeneratedPreconditioner().get()))
757 {
758 this->sub_op_needs_wrapped_vecs = true;
759 this->needs_wrapped_vecs = true;
760 }
761 }
762 else
763 {
764 this->solver_gen = bicgstab::build()
765 .with_criteria(this->combined_factory)
766 .with_preconditioner(preconditioner.GetFactory())
767 .on(this->executor);
768 }
769}
770
771
772/* ---------------------- CGSSolver ------------------------ */
774 : EnableGinkgoSolver(exec, true)
775{
776 using cgs = gko::solver::Cgs<real_t>;
777 this->solver_gen =
778 cgs::build().with_criteria(this->combined_factory).on(this->executor);
779}
780
782 const GinkgoPreconditioner &preconditioner)
783 : EnableGinkgoSolver(exec, true)
784{
785 using cgs = gko::solver::Cgs<real_t>;
786 if (preconditioner.HasGeneratedPreconditioner())
787 {
788 this->solver_gen = cgs::build()
789 .with_criteria(this->combined_factory)
790 .with_generated_preconditioner(
791 preconditioner.GetGeneratedPreconditioner())
792 .on(this->executor);
793 if (dynamic_cast<const OperatorWrapper*>(preconditioner.
794 GetGeneratedPreconditioner().get()))
795 {
796 this->sub_op_needs_wrapped_vecs = true;
797 this->needs_wrapped_vecs = true;
798 }
799 }
800 else
801 {
802 this->solver_gen = cgs::build()
803 .with_criteria(this->combined_factory)
804 .with_preconditioner(preconditioner.GetFactory())
805 .on(this->executor);
806 }
807}
808
809
810/* ---------------------- FCGSolver ------------------------ */
812 : EnableGinkgoSolver(exec, true)
813{
814 using fcg = gko::solver::Fcg<real_t>;
815 this->solver_gen =
816 fcg::build().with_criteria(this->combined_factory).on(this->executor);
817}
818
820 const GinkgoPreconditioner &preconditioner)
821 : EnableGinkgoSolver(exec, true)
822{
823 using fcg = gko::solver::Fcg<real_t>;
824 if (preconditioner.HasGeneratedPreconditioner())
825 {
826 this->solver_gen = fcg::build()
827 .with_criteria(this->combined_factory)
828 .with_generated_preconditioner(
829 preconditioner.GetGeneratedPreconditioner())
830 .on(this->executor);
831 if (dynamic_cast<const OperatorWrapper*>(preconditioner.
832 GetGeneratedPreconditioner().get()))
833 {
834 this->sub_op_needs_wrapped_vecs = true;
835 this->needs_wrapped_vecs = true;
836 }
837 }
838 else
839 {
840 this->solver_gen = fcg::build()
841 .with_criteria(this->combined_factory)
842 .with_preconditioner(preconditioner.GetFactory())
843 .on(this->executor);
844 }
845}
846
847
848/* ---------------------- GMRESSolver ------------------------ */
850 : EnableGinkgoSolver(exec, false),
851 m{dim}
852{
853 using gmres = gko::solver::Gmres<real_t>;
854 if (this->m == 0) // Don't set a dimension, but let Ginkgo use its default
855 {
856 this->solver_gen = gmres::build()
857 .with_criteria(this->combined_factory)
858 .on(this->executor);
859 }
860 else
861 {
862 this->solver_gen = gmres::build()
863 .with_krylov_dim(static_cast<unsigned long>(m))
864 .with_criteria(this->combined_factory)
865 .on(this->executor);
866 }
867}
868
870 const GinkgoPreconditioner &preconditioner, int dim)
871 : EnableGinkgoSolver(exec, false),
872 m{dim}
873{
874 using gmres = gko::solver::Gmres<real_t>;
875 // Check for a previously-generated preconditioner (for a specific matrix)
876 if (this->m == 0) // Don't set a dimension, but let Ginkgo use its default
877 {
878 if (preconditioner.HasGeneratedPreconditioner())
879 {
880 this->solver_gen = gmres::build()
881 .with_criteria(this->combined_factory)
882 .with_generated_preconditioner(
883 preconditioner.GetGeneratedPreconditioner())
884 .on(this->executor);
885 if (dynamic_cast<const OperatorWrapper*>(preconditioner.
886 GetGeneratedPreconditioner().get()))
887 {
888 this->sub_op_needs_wrapped_vecs = true;
889 this->needs_wrapped_vecs = true;
890 }
891 }
892 else
893 {
894 this->solver_gen = gmres::build()
895 .with_criteria(this->combined_factory)
896 .with_preconditioner(preconditioner.GetFactory())
897 .on(this->executor);
898 }
899 }
900 else
901 {
902 if (preconditioner.HasGeneratedPreconditioner())
903 {
904 this->solver_gen = gmres::build()
905 .with_krylov_dim(static_cast<unsigned long>(m))
906 .with_criteria(this->combined_factory)
907 .with_generated_preconditioner(
908 preconditioner.GetGeneratedPreconditioner())
909 .on(this->executor);
910 if (dynamic_cast<const OperatorWrapper*>(preconditioner.
911 GetGeneratedPreconditioner().get()))
912 {
913 this->sub_op_needs_wrapped_vecs = true;
914 this->needs_wrapped_vecs = true;
915 }
916 }
917 else
918 {
919 this->solver_gen = gmres::build()
920 .with_krylov_dim(static_cast<unsigned long>(m))
921 .with_criteria(this->combined_factory)
922 .with_preconditioner(preconditioner.GetFactory())
923 .on(this->executor);
924 }
925 }
926}
927
929{
930 m = dim;
931 using gmres = gko::solver::Gmres<real_t>;
932 // Create new solver factory with other parameters the same, but new value for krylov_dim
933 auto current_params = gko::as<gmres::Factory>(solver_gen)->get_parameters();
934 this->solver_gen = current_params.with_krylov_dim(static_cast<unsigned long>(m))
935 .on(this->executor);
936 if (solver)
937 {
938 gko::as<gmres>(solver)->set_krylov_dim(static_cast<unsigned long>(m));
939 }
940}
941
942/* ---------------------- CBGMRESSolver ------------------------ */
944 storage_precision prec)
945 : EnableGinkgoSolver(exec, false),
946 m{dim}
947{
948 using gmres = gko::solver::CbGmres<real_t>;
949 if (this->m == 0) // Don't set a dimension, but let Ginkgo use its default
950 {
951 this->solver_gen = gmres::build()
952 .with_criteria(this->combined_factory)
953 .with_storage_precision(prec)
954 .on(this->executor);
955 }
956 else
957 {
958 this->solver_gen = gmres::build()
959 .with_krylov_dim(static_cast<unsigned long>(m))
960 .with_criteria(this->combined_factory)
961 .with_storage_precision(prec)
962 .on(this->executor);
963 }
964}
965
967 const GinkgoPreconditioner &preconditioner,
968 int dim, storage_precision prec)
969 : EnableGinkgoSolver(exec, false),
970 m{dim}
971{
972 using gmres = gko::solver::CbGmres<real_t>;
973 // Check for a previously-generated preconditioner (for a specific matrix)
974 if (this->m == 0) // Don't set a dimension, but let Ginkgo use its default
975 {
976 if (preconditioner.HasGeneratedPreconditioner())
977 {
978 this->solver_gen = gmres::build()
979 .with_criteria(this->combined_factory)
980 .with_storage_precision(prec)
981 .with_generated_preconditioner(
982 preconditioner.GetGeneratedPreconditioner())
983 .on(this->executor);
984 if (dynamic_cast<const OperatorWrapper*>(preconditioner.
985 GetGeneratedPreconditioner().get()))
986 {
987 this->sub_op_needs_wrapped_vecs = true;
988 this->needs_wrapped_vecs = true;
989 }
990 }
991 else
992 {
993 this->solver_gen = gmres::build()
994 .with_criteria(this->combined_factory)
995 .with_storage_precision(prec)
996 .with_preconditioner(preconditioner.GetFactory())
997 .on(this->executor);
998 }
999 }
1000 else
1001 {
1002 if (preconditioner.HasGeneratedPreconditioner())
1003 {
1004 this->solver_gen = gmres::build()
1005 .with_krylov_dim(static_cast<unsigned long>(m))
1006 .with_criteria(this->combined_factory)
1007 .with_storage_precision(prec)
1008 .with_generated_preconditioner(
1009 preconditioner.GetGeneratedPreconditioner())
1010 .on(this->executor);
1011 if (dynamic_cast<const OperatorWrapper*>(preconditioner.
1012 GetGeneratedPreconditioner().get()))
1013 {
1014 this->sub_op_needs_wrapped_vecs = true;
1015 this->needs_wrapped_vecs = true;
1016 }
1017 }
1018 else
1019 {
1020 this->solver_gen = gmres::build()
1021 .with_krylov_dim(static_cast<unsigned long>(m))
1022 .with_criteria(this->combined_factory)
1023 .with_storage_precision(prec)
1024 .with_preconditioner(preconditioner.GetFactory())
1025 .on(this->executor);
1026 }
1027 }
1028}
1029
1031{
1032 m = dim;
1033 using gmres = gko::solver::CbGmres<real_t>;
1034 // Create new solver factory with other parameters the same, but new value for krylov_dim
1035 auto current_params = gko::as<gmres::Factory>(solver_gen)->get_parameters();
1036 this->solver_gen = current_params.with_krylov_dim(static_cast<unsigned long>(m))
1037 .on(this->executor);
1038 if (solver)
1039 {
1040 gko::as<gmres>(solver)->set_krylov_dim(static_cast<unsigned long>(m));
1041 }
1042}
1043
1044/* ---------------------- IRSolver ------------------------ */
1046 : EnableGinkgoSolver(exec, false)
1047{
1048 using ir = gko::solver::Ir<real_t>;
1049 this->solver_gen =
1050 ir::build().with_criteria(this->combined_factory).on(this->executor);
1051}
1052
1054 const GinkgoIterativeSolver &inner_solver)
1055 : EnableGinkgoSolver(exec, false)
1056{
1057 using ir = gko::solver::Ir<real_t>;
1058 this->solver_gen = ir::build()
1059 .with_criteria(this->combined_factory)
1060 .with_solver(inner_solver.GetFactory())
1061 .on(this->executor);
1062 if (inner_solver.UsesVectorWrappers())
1063 {
1064 this->sub_op_needs_wrapped_vecs = true;
1065 this->needs_wrapped_vecs = true;
1066 }
1067}
1068
1069/* --------------------------------------------------------------- */
1070/* ---------------------- Preconditioners ------------------------ */
1078
1079void
1081{
1082
1083 MFEM_VERIFY(generated_precond, "Preconditioner not initialized");
1084 MFEM_VERIFY(executor, "executor is not initialized");
1085
1086 using vec = gko::matrix::Dense<real_t>;
1087 if (!iterative_mode)
1088 {
1089 y = 0.0;
1090 }
1091
1092 // Create x and y vectors in Ginkgo's format. Wrap MFEM's data directly,
1093 // on CPU or GPU.
1094 bool on_device = false;
1095 if (executor != executor->get_master())
1096 {
1097 on_device = true;
1098 }
1099 auto gko_x = vec::create(executor, gko::dim<2>(x.Size(), 1),
1101 x.Size(), const_cast<real_t *>(
1102 x.Read(on_device))), 1);
1103 auto gko_y = vec::create(executor, gko::dim<2>(y.Size(), 1),
1105 y.Size(),
1106 y.ReadWrite(on_device)), 1);
1107 generated_precond.get()->apply(gko_x, gko_y);
1108}
1109
1111{
1112
1114 {
1115 generated_precond.reset();
1116 has_generated_precond = false;
1117 }
1118
1119 // Only accept SparseMatrix for this type.
1120 SparseMatrix *op_mat = const_cast<SparseMatrix*>(
1121 dynamic_cast<const SparseMatrix*>(&op));
1122 MFEM_VERIFY(op_mat != NULL,
1123 "GinkgoPreconditioner::SetOperator : not a SparseMatrix!");
1124
1125 bool on_device = false;
1126 if (executor != executor->get_master())
1127 {
1128 on_device = true;
1129 }
1130
1131 using mtx = gko::matrix::Csr<real_t, int>;
1132 const int nnz = op_mat->GetMemoryData().Capacity();
1133 auto gko_matrix = mtx::create(
1134 executor, gko::dim<2>(op_mat->Height(), op_mat->Width()),
1136 nnz,
1137 op_mat->ReadWriteData(on_device)),
1139 nnz,
1140 op_mat->ReadWriteJ(on_device)),
1141 gko_array<int>::view(executor, op_mat->Height() + 1,
1142 op_mat->ReadWriteI(on_device)));
1143
1144 generated_precond = precond_gen->generate(gko::give(gko_matrix));
1145 has_generated_precond = true;
1146
1147 // Set MFEM Solver size values
1148 height = op.Height();
1149 width = op.Width();
1150}
1151
1152
1153/* ---------------------- JacobiPreconditioner ------------------------ */
1155 GinkgoExecutor &exec,
1156 const std::string &storage_opt,
1157 const real_t accuracy,
1158 const int max_block_size
1159)
1160 : GinkgoPreconditioner(exec)
1161{
1162
1163 if (storage_opt == "auto")
1164 {
1165 precond_gen = gko::preconditioner::Jacobi<real_t, int>::build()
1166 .with_storage_optimization(
1167 gko::precision_reduction::autodetect())
1168 .with_accuracy(accuracy)
1169 .with_max_block_size(static_cast<unsigned int>(max_block_size))
1170 .on(executor);
1171 }
1172 else
1173 {
1174 precond_gen = gko::preconditioner::Jacobi<real_t, int>::build()
1175 .with_storage_optimization(
1176 gko::precision_reduction(0, 0))
1177 .with_accuracy(accuracy)
1178 .with_max_block_size(static_cast<unsigned int>(max_block_size))
1179 .on(executor);
1180 }
1181
1182}
1183
1184/* ---------------------- Ilu/IluIsaiPreconditioner ------------------------ */
1186 GinkgoExecutor &exec,
1187 const std::string &factorization_type,
1188 const int sweeps,
1189 const bool skip_sort
1190)
1191 : GinkgoPreconditioner(exec)
1192{
1193 if (factorization_type == "exact")
1194 {
1195 using ilu_fact_type = gko::factorization::Ilu<real_t, int>;
1196 std::shared_ptr<ilu_fact_type::Factory> fact_factory =
1197 ilu_fact_type::build()
1198 .with_skip_sorting(skip_sort)
1199 .on(executor);
1200 precond_gen = gko::preconditioner::Ilu<>::build()
1201 .with_factorization(fact_factory)
1202 .on(executor);
1203 }
1204 else
1205 {
1206 using ilu_fact_type = gko::factorization::ParIlu<real_t, int>;
1207 std::shared_ptr<ilu_fact_type::Factory> fact_factory =
1208 ilu_fact_type::build()
1209 .with_iterations(static_cast<unsigned long>(sweeps))
1210 .with_skip_sorting(skip_sort)
1211 .on(executor);
1212 precond_gen = gko::preconditioner::Ilu<>::build()
1213 .with_factorization(fact_factory)
1214 .on(executor);
1215 }
1216
1217}
1218
1220 GinkgoExecutor &exec,
1221 const std::string &factorization_type,
1222 const int sweeps,
1223 const int sparsity_power,
1224 const bool skip_sort
1225)
1226 : GinkgoPreconditioner(exec)
1227{
1228 using l_solver_type = gko::preconditioner::LowerIsai<>;
1229 using u_solver_type = gko::preconditioner::UpperIsai<>;
1230
1231 std::shared_ptr<l_solver_type::Factory> l_solver_factory =
1232 l_solver_type::build()
1233 .with_sparsity_power(sparsity_power)
1234 .on(executor);
1235 std::shared_ptr<u_solver_type::Factory> u_solver_factory =
1236 u_solver_type::build()
1237 .with_sparsity_power(sparsity_power)
1238 .on(executor);
1239
1240
1241
1242 if (factorization_type == "exact")
1243 {
1244 using ilu_fact_type = gko::factorization::Ilu<real_t, int>;
1245 std::shared_ptr<ilu_fact_type::Factory> fact_factory =
1246 ilu_fact_type::build()
1247 .with_skip_sorting(skip_sort)
1248 .on(executor);
1249 precond_gen = gko::preconditioner::Ilu<l_solver_type,
1250 u_solver_type>::build()
1251 .with_factorization(fact_factory)
1252 .with_l_solver(l_solver_factory)
1253 .with_u_solver(u_solver_factory)
1254 .on(executor);
1255
1256 }
1257 else
1258 {
1259 using ilu_fact_type = gko::factorization::ParIlu<real_t, int>;
1260 std::shared_ptr<ilu_fact_type::Factory> fact_factory =
1261 ilu_fact_type::build()
1262 .with_iterations(static_cast<unsigned long>(sweeps))
1263 .with_skip_sorting(skip_sort)
1264 .on(executor);
1265 precond_gen = gko::preconditioner::Ilu<l_solver_type,
1266 u_solver_type>::build()
1267 .with_factorization(fact_factory)
1268 .with_l_solver(l_solver_factory)
1269 .with_u_solver(u_solver_factory)
1270 .on(executor);
1271 }
1272}
1273
1274
1275/* ---------------------- Ic/IcIsaiPreconditioner ------------------------ */
1277 GinkgoExecutor &exec,
1278 const std::string &factorization_type,
1279 const int sweeps,
1280 const bool skip_sort
1281)
1282 : GinkgoPreconditioner(exec)
1283{
1284
1285 if (factorization_type == "exact")
1286 {
1287 using ic_fact_type = gko::factorization::Ic<real_t, int>;
1288 std::shared_ptr<ic_fact_type::Factory> fact_factory =
1289 ic_fact_type::build()
1290 .with_both_factors(false)
1291 .with_skip_sorting(skip_sort)
1292 .on(executor);
1293 precond_gen = gko::preconditioner::Ic<>::build()
1294 .with_factorization(fact_factory)
1295 .on(executor);
1296 }
1297 else
1298 {
1299 using ic_fact_type = gko::factorization::ParIc<real_t, int>;
1300 std::shared_ptr<ic_fact_type::Factory> fact_factory =
1301 ic_fact_type::build()
1302 .with_both_factors(false)
1303 .with_iterations(static_cast<unsigned long>(sweeps))
1304 .with_skip_sorting(skip_sort)
1305 .on(executor);
1306 precond_gen = gko::preconditioner::Ic<>::build()
1307 .with_factorization(fact_factory)
1308 .on(executor);
1309 }
1310}
1311
1313 GinkgoExecutor &exec,
1314 const std::string &factorization_type,
1315 const int sweeps,
1316 const int sparsity_power,
1317 const bool skip_sort
1318)
1319 : GinkgoPreconditioner(exec)
1320{
1321
1322 using l_solver_type = gko::preconditioner::LowerIsai<>;
1323 std::shared_ptr<l_solver_type::Factory> l_solver_factory =
1324 l_solver_type::build()
1325 .with_sparsity_power(sparsity_power)
1326 .on(executor);
1327 if (factorization_type == "exact")
1328 {
1329 using ic_fact_type = gko::factorization::Ic<real_t, int>;
1330 std::shared_ptr<ic_fact_type::Factory> fact_factory =
1331 ic_fact_type::build()
1332 .with_both_factors(false)
1333 .with_skip_sorting(skip_sort)
1334 .on(executor);
1335 precond_gen = gko::preconditioner::Ic<l_solver_type>::build()
1336 .with_factorization(fact_factory)
1337 .with_l_solver(l_solver_factory)
1338 .on(executor);
1339 }
1340 else
1341 {
1342 using ic_fact_type = gko::factorization::ParIc<real_t, int>;
1343 std::shared_ptr<ic_fact_type::Factory> fact_factory =
1344 ic_fact_type::build()
1345 .with_both_factors(false)
1346 .with_iterations(static_cast<unsigned long>(sweeps))
1347 .with_skip_sorting(skip_sort)
1348 .on(executor);
1349 precond_gen = gko::preconditioner::Ic<l_solver_type>::build()
1350 .with_factorization(fact_factory)
1351 .with_l_solver(l_solver_factory)
1352 .on(executor);
1353 }
1354}
1355
1356/* ---------------------- MFEMPreconditioner ------------------------ */
1358 GinkgoExecutor &exec,
1359 const Solver &mfem_precond
1360)
1361 : GinkgoPreconditioner(exec)
1362{
1363 generated_precond = std::shared_ptr<OperatorWrapper>(
1365 mfem_precond.Height(), &mfem_precond));
1366 has_generated_precond = true;
1367}
1368
1369} // namespace Ginkgo
1370
1371} // namespace mfem
1372
1373#endif // MFEM_USE_GINKGO
The MFEM Device class abstracts hardware devices such as GPUs, as well as programming models such as ...
Definition device.hpp:123
static bool Allows(unsigned long b_mask)
Return true if any of the backends in the backend mask, b_mask, are allowed.
Definition device.hpp:259
BICGSTABSolver(GinkgoExecutor &exec)
Definition ginkgo.cpp:734
CBGMRESSolver(GinkgoExecutor &exec, int dim=0, storage_precision prec=storage_precision::reduce1)
Definition ginkgo.cpp:943
CGSSolver(GinkgoExecutor &exec)
Definition ginkgo.cpp:773
FCGSolver(GinkgoExecutor &exec)
Definition ginkgo.cpp:811
void SetKDim(int dim)
Definition ginkgo.cpp:928
std::shared_ptr< gko::Executor > GetExecutor() const
Definition ginkgo.hpp:524
GinkgoExecutor(ExecType exec_type)
Definition ginkgo.cpp:33
@ CUDA
CUDA GPU Executor.
Definition ginkgo.hpp:478
@ HIP
HIP GPU Executor.
Definition ginkgo.hpp:480
@ REFERENCE
Reference CPU Executor.
Definition ginkgo.hpp:474
@ OMP
OpenMP CPU Executor.
Definition ginkgo.hpp:476
void SetOperator(const Operator &op) override
Definition ginkgo.cpp:631
std::shared_ptr< gko::stop::ImplicitResidualNorm<>::Factory > imp_abs_criterion
Definition ginkgo.hpp:755
void Mult(const Vector &x, Vector &y) const override
Definition ginkgo.cpp:503
std::shared_ptr< ResidualLogger<> > residual_logger
Definition ginkgo.hpp:767
std::shared_ptr< gko::stop::ResidualNorm<>::Factory > abs_criterion
Definition ginkgo.hpp:741
std::shared_ptr< gko::stop::Combined::Factory > combined_factory
Definition ginkgo.hpp:773
const std::shared_ptr< gko::LinOpFactory > GetFactory() const
Definition ginkgo.hpp:656
std::shared_ptr< gko::log::Convergence<> > convergence_logger
Definition ginkgo.hpp:761
GinkgoIterativeSolver(GinkgoExecutor &exec, bool use_implicit_res_norm)
Definition ginkgo.cpp:342
std::shared_ptr< gko::LinOpFactory > solver_gen
Definition ginkgo.hpp:722
std::shared_ptr< gko::stop::ResidualNorm<>::Factory > rel_criterion
Definition ginkgo.hpp:734
std::shared_ptr< gko::stop::ImplicitResidualNorm<>::Factory > imp_rel_criterion
Definition ginkgo.hpp:748
std::shared_ptr< gko::Executor > executor
Definition ginkgo.hpp:780
std::shared_ptr< gko::LinOp > solver
Definition ginkgo.hpp:727
void SetOperator(const Operator &op) override
Definition ginkgo.cpp:1110
std::shared_ptr< gko::Executor > executor
Definition ginkgo.hpp:629
const std::shared_ptr< gko::LinOpFactory > GetFactory() const
Definition ginkgo.hpp:588
void Mult(const Vector &x, Vector &y) const override
Definition ginkgo.cpp:1080
std::shared_ptr< gko::LinOp > generated_precond
Definition ginkgo.hpp:622
GinkgoPreconditioner(GinkgoExecutor &exec)
Definition ginkgo.cpp:1071
std::shared_ptr< gko::LinOpFactory > precond_gen
Definition ginkgo.hpp:615
const std::shared_ptr< gko::LinOp > GetGeneratedPreconditioner() const
Definition ginkgo.hpp:597
IRSolver(GinkgoExecutor &exec)
Definition ginkgo.cpp:1045
IcIsaiPreconditioner(GinkgoExecutor &exec, const std::string &factorization_type="exact", const int sweeps=0, const int sparsity_power=1, const bool skip_sort=false)
Definition ginkgo.cpp:1312
IcPreconditioner(GinkgoExecutor &exec, const std::string &factorization_type="exact", const int sweeps=0, const bool skip_sort=false)
Definition ginkgo.cpp:1276
IluIsaiPreconditioner(GinkgoExecutor &exec, const std::string &factorization_type="exact", const int sweeps=0, const int sparsity_power=1, const bool skip_sort=false)
Definition ginkgo.cpp:1219
IluPreconditioner(GinkgoExecutor &exec, const std::string &factorization_type="exact", const int sweeps=0, const bool skip_sort=false)
Definition ginkgo.cpp:1185
JacobiPreconditioner(GinkgoExecutor &exec, const std::string &storage_opt="none", const real_t accuracy=1.e-1, const int max_block_size=32)
Definition ginkgo.cpp:1154
MFEMPreconditioner(GinkgoExecutor &exec, const Solver &mfem_precond)
Definition ginkgo.cpp:1357
void apply_impl(const gko::LinOp *b, gko::LinOp *x) const override
Definition ginkgo.cpp:418
const Vector & get_mfem_vec_const_ref() const
Definition ginkgo.hpp:116
int Capacity() const
Return the size of the allocated memory.
MemoryType GetMemoryType() const
Return a MemoryType that is currently valid. If both the host and the device pointers are currently v...
Abstract operator.
Definition operator.hpp:25
int width
Dimension of the input / number of columns in the matrix.
Definition operator.hpp:28
int Height() const
Get the height (size of output) of the Operator. Synonym with NumRows().
Definition operator.hpp:66
int height
Dimension of the output / number of rows in the matrix.
Definition operator.hpp:27
virtual void Mult(const Vector &x, Vector &y) const =0
Operator application: y=A(x).
int Width() const
Get the width (size of input) of the Operator. Synonym with NumCols().
Definition operator.hpp:72
Base class for solvers.
Definition operator.hpp:780
bool iterative_mode
If true, use the second argument of Mult() as an initial guess.
Definition operator.hpp:783
Data type sparse matrix.
Definition sparsemat.hpp:51
int * ReadWriteI(bool on_dev=true)
real_t * ReadWriteData(bool on_dev=true)
int * ReadWriteJ(bool on_dev=true)
Memory< real_t > & GetMemoryData()
Vector data type.
Definition vector.hpp:82
virtual const real_t * Read(bool on_dev=true) const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), on_dev).
Definition vector.hpp:494
virtual real_t * ReadWrite(bool on_dev=true)
Shortcut for mfem::ReadWrite(vec.GetMemory(), vec.Size(), on_dev).
Definition vector.hpp:510
Memory< real_t > & GetMemory()
Return a reference to the Memory object used by the Vector.
Definition vector.hpp:257
void Destroy()
Destroy a vector.
Definition vector.hpp:635
int Size() const
Returns the size of the vector.
Definition vector.hpp:226
virtual void UseDevice(bool use_dev) const
Enable execution of Vector operations using the mfem::Device.
Definition vector.hpp:144
Vector & Add(const real_t a, const Vector &Va)
(*this) += a * Va
Definition vector.cpp:322
Vector beta
const real_t alpha
Definition ex15.cpp:369
int dim
Definition ex24.cpp:53
real_t b
Definition lissajous.cpp:42
gko::array< T > gko_array
Definition ginkgo.hpp:40
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
Definition globals.hpp:66
OutStream err(std::cerr)
Global stream used by the library for standard error output. Initially it uses the same std::streambu...
Definition globals.hpp:71
float real_t
Definition config.hpp:43
@ HIP_MASK
Biwise-OR of all HIP backends.
Definition device.hpp:91
@ CUDA_MASK
Biwise-OR of all CUDA backends.
Definition device.hpp:89
@ OMP_MASK
Biwise-OR of all OpenMP backends.
Definition device.hpp:93