MFEM v4.7.0
Finite element discretization library
Loading...
Searching...
No Matches
device.cpp
Go to the documentation of this file.
1// Copyright (c) 2010-2024, Lawrence Livermore National Security, LLC. Produced
2// at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3// LICENSE and NOTICE for details. LLNL-CODE-806117.
4//
5// This file is part of the MFEM library. For more information and source code
6// availability visit https://mfem.org.
7//
8// MFEM is free software; you can redistribute it and/or modify it under the
9// terms of the BSD-3 license. We welcome feedback and contributions, see file
10// CONTRIBUTING.md for details.
11
12#include "forall.hpp"
13#include "occa.hpp"
14#ifdef MFEM_USE_CEED
15#include "../fem/ceed/interface/util.hpp"
16#endif
17#ifdef MFEM_USE_MPI
18#include "../linalg/hypre.hpp"
19#endif
20
21#include <unordered_map>
22#include <string>
23#include <map>
24
25namespace mfem
26{
27
28// Place the following variables in the mfem::internal namespace, so that they
29// will not be included in the doxygen documentation.
30namespace internal
31{
32
33#ifdef MFEM_USE_OCCA
34// Default occa::device used by MFEM.
35occa::device occaDevice;
36#endif
37
38#ifdef MFEM_USE_CEED
39Ceed ceed = NULL;
40
41ceed::BasisMap ceed_basis_map;
42ceed::RestrMap ceed_restr_map;
43#endif
44
45// Backends listed by priority, high to low:
46static const Backend::Id backend_list[Backend::NUM_BACKENDS] =
47{
52};
53
54// Backend names listed by priority, high to low:
55static const char *backend_name[Backend::NUM_BACKENDS] =
56{
57 "ceed-cuda", "occa-cuda", "raja-cuda", "cuda",
58 "ceed-hip", "raja-hip", "hip", "debug",
59 "occa-omp", "raja-omp", "omp",
60 "ceed-cpu", "occa-cpu", "raja-cpu", "cpu"
61};
62
63} // namespace mfem::internal
64
65
66// Initialize the unique global Device variable.
67Device Device::device_singleton;
68bool Device::device_env = false;
69bool Device::mem_host_env = false;
70bool Device::mem_device_env = false;
71bool Device::mem_types_set = false;
72
74{
75 if (getenv("MFEM_MEMORY") && !mem_host_env && !mem_device_env)
76 {
77 std::string mem_backend(getenv("MFEM_MEMORY"));
78 if (mem_backend == "host")
79 {
80 mem_host_env = true;
81 host_mem_type = MemoryType::HOST;
82 device_mem_type = MemoryType::HOST;
83 }
84 else if (mem_backend == "host32")
85 {
86 mem_host_env = true;
87 host_mem_type = MemoryType::HOST_32;
88 device_mem_type = MemoryType::HOST_32;
89 }
90 else if (mem_backend == "host64")
91 {
92 mem_host_env = true;
93 host_mem_type = MemoryType::HOST_64;
94 device_mem_type = MemoryType::HOST_64;
95 }
96 else if (mem_backend == "umpire")
97 {
98 mem_host_env = true;
99 host_mem_type = MemoryType::HOST_UMPIRE;
100 // Note: device_mem_type will be set to MemoryType::DEVICE_UMPIRE only
101 // when an actual device is configured -- this is done later in
102 // Device::UpdateMemoryTypeAndClass().
103 device_mem_type = MemoryType::HOST_UMPIRE;
104 }
105 else if (mem_backend == "debug")
106 {
107 mem_host_env = true;
108 host_mem_type = MemoryType::HOST_DEBUG;
109 // Note: device_mem_type will be set to MemoryType::DEVICE_DEBUG only
110 // when an actual device is configured -- this is done later in
111 // Device::UpdateMemoryTypeAndClass().
112 device_mem_type = MemoryType::HOST_DEBUG;
113 }
114 else if (false
115#ifdef MFEM_USE_CUDA
116 || mem_backend == "cuda"
117#endif
118#ifdef MFEM_USE_HIP
119 || mem_backend == "hip"
120#endif
121 )
122 {
123 mem_host_env = true;
124 host_mem_type = MemoryType::HOST;
125 mem_device_env = true;
126 device_mem_type = MemoryType::DEVICE;
127 }
128 else if (mem_backend == "uvm")
129 {
130 mem_host_env = true;
131 mem_device_env = true;
132 host_mem_type = MemoryType::MANAGED;
133 device_mem_type = MemoryType::MANAGED;
134 }
135 else
136 {
137 MFEM_ABORT("Unknown memory backend!");
138 }
139 mm.Configure(host_mem_type, device_mem_type);
140 }
141
142 if (getenv("MFEM_DEVICE"))
143 {
144 std::string device(getenv("MFEM_DEVICE"));
145 Configure(device);
146 device_env = true;
147 }
148}
149
150
152{
153 if ( device_env && !destroy_mm) { return; }
154 if (!device_env && destroy_mm && !mem_host_env)
155 {
156 free(device_option);
157#ifdef MFEM_USE_CEED
158 // Destroy FES -> CeedBasis, CeedElemRestriction hash table contents
159 for (auto entry : internal::ceed_basis_map)
160 {
161 CeedBasisDestroy(&entry.second);
162 }
163 internal::ceed_basis_map.clear();
164 for (auto entry : internal::ceed_restr_map)
165 {
166 CeedElemRestrictionDestroy(&entry.second);
167 }
168 internal::ceed_restr_map.clear();
169 // Destroy Ceed context
170 CeedDestroy(&internal::ceed);
171#endif
172 mm.Destroy();
173 }
174 Get().ngpu = -1;
175 Get().mode = SEQUENTIAL;
176 Get().backends = Backend::CPU;
177 Get().host_mem_type = MemoryType::HOST;
178 Get().host_mem_class = MemoryClass::HOST;
179 Get().device_mem_type = MemoryType::HOST;
180 Get().device_mem_class = MemoryClass::HOST;
181}
182
183void Device::Configure(const std::string &device, const int device_id)
184{
185 // If a device was configured via the environment, skip the configuration,
186 // and avoid the 'singleton_device' to destroy the mm.
187 if (device_env)
188 {
189 std::memcpy(this, &Get(), sizeof(Device));
190 Get().destroy_mm = false;
191 return;
192 }
193
194 std::map<std::string, Backend::Id> bmap;
195 for (int i = 0; i < Backend::NUM_BACKENDS; i++)
196 {
197 bmap[internal::backend_name[i]] = internal::backend_list[i];
198 }
199 std::string::size_type beg = 0, end, option;
200 while (1)
201 {
202 end = device.find(',', beg);
203 end = (end != std::string::npos) ? end : device.size();
204 const std::string bname = device.substr(beg, end - beg);
205 option = bname.find(':');
206 if (option==std::string::npos) // No option
207 {
208 const std::string backend = bname;
209 std::map<std::string, Backend::Id>::iterator it = bmap.find(backend);
210 MFEM_VERIFY(it != bmap.end(), "invalid backend name: '" << backend << '\'');
211 Get().MarkBackend(it->second);
212 }
213 else
214 {
215 const std::string backend = bname.substr(0, option);
216 const std::string boption = bname.substr(option+1);
217 Get().device_option = strdup(boption.c_str());
218 std::map<std::string, Backend::Id>::iterator it = bmap.find(backend);
219 MFEM_VERIFY(it != bmap.end(), "invalid backend name: '" << backend << '\'');
220 Get().MarkBackend(it->second);
221 }
222 if (end == device.size()) { break; }
223 beg = end + 1;
224 }
225
226 // OCCA_CUDA and CEED_CUDA need CUDA or RAJA_CUDA:
229 {
230 Get().MarkBackend(Backend::CUDA);
231 }
232 // CEED_HIP needs HIP:
234 {
235 Get().MarkBackend(Backend::HIP);
236 }
237 // OCCA_OMP will use OMP or RAJA_OMP unless MFEM_USE_OPENMP=NO:
238#ifdef MFEM_USE_OPENMP
240 {
241 Get().MarkBackend(Backend::OMP);
242 }
243#endif
244
245 // Perform setup.
246 Get().Setup(device_id);
247
248 // Enable the device
249 Enable();
250
251 // Copy all data members from the global 'singleton_device' into '*this'.
252 if (this != &Get()) { std::memcpy(this, &Get(), sizeof(Device)); }
253
254 // Only '*this' will call the MemoryManager::Destroy() method.
255 destroy_mm = true;
256
257#ifdef MFEM_USE_MPI
259#endif
260}
261
262// static method
264{
265 // If the device and/or the MemoryTypes are configured through the
266 // environment (variables 'MFEM_DEVICE', 'MFEM_MEMORY'), ignore calls to this
267 // method.
268 if (mem_host_env || mem_device_env || device_env) { return; }
269
270 MFEM_VERIFY(!IsConfigured(), "the default MemoryTypes can only be set before"
271 " Device construction and configuration");
272 MFEM_VERIFY(IsHostMemory(h_mt),
273 "invalid host MemoryType, h_mt = " << (int)h_mt);
274 MFEM_VERIFY(IsDeviceMemory(d_mt) || d_mt == h_mt,
275 "invalid device MemoryType, d_mt = " << (int)d_mt
276 << " (h_mt = " << (int)h_mt << ')');
277
278 Get().host_mem_type = h_mt;
279 Get().device_mem_type = d_mt;
280 mem_types_set = true;
281
282 // h_mt and d_mt will be set as dual to each other during configuration by
283 // the call mm.Configure(...) in UpdateMemoryTypeAndClass()
284}
285
286void Device::Print(std::ostream &os)
287{
288 os << "Device configuration: ";
289 bool add_comma = false;
290 for (int i = 0; i < Backend::NUM_BACKENDS; i++)
291 {
292 if (backends & internal::backend_list[i])
293 {
294 if (add_comma) { os << ','; }
295 add_comma = true;
296 os << internal::backend_name[i];
297 }
298 }
299 os << '\n';
300#ifdef MFEM_USE_CEED
302 {
303 const char *ceed_backend;
304 CeedGetResource(internal::ceed, &ceed_backend);
305 os << "libCEED backend: " << ceed_backend << '\n';
306 }
307#endif
308 os << "Memory configuration: "
309 << MemoryTypeName[static_cast<int>(host_mem_type)];
311 {
312 os << ',' << MemoryTypeName[static_cast<int>(device_mem_type)];
313 }
314 os << std::endl;
315}
316
317void Device::UpdateMemoryTypeAndClass()
318{
319 const bool debug = Device::Allows(Backend::DEBUG_DEVICE);
320
321 const bool device = Device::Allows(Backend::DEVICE_MASK);
322
323#ifdef MFEM_USE_UMPIRE
324 // If MFEM has been compiled with Umpire support, use it as the default
325 if (!mem_host_env && !mem_types_set)
326 {
327 host_mem_type = MemoryType::HOST_UMPIRE;
328 if (!mem_device_env)
329 {
330 device_mem_type = MemoryType::HOST_UMPIRE;
331 }
332 }
333#endif
334
335 // Enable the device memory type
336 if (device)
337 {
338 if (!mem_device_env)
339 {
340 if (mem_host_env)
341 {
342 switch (host_mem_type)
343 {
345 device_mem_type = MemoryType::DEVICE_UMPIRE;
346 break;
348 device_mem_type = MemoryType::DEVICE_DEBUG;
349 break;
350 default:
351 device_mem_type = MemoryType::DEVICE;
352 }
353 }
354 else if (!mem_types_set)
355 {
356#ifndef MFEM_USE_UMPIRE
357 device_mem_type = MemoryType::DEVICE;
358#else
359 device_mem_type = MemoryType::DEVICE_UMPIRE;
360#endif
361 }
362 }
363 device_mem_class = MemoryClass::DEVICE;
364 }
365
366 // Enable the UVM shortcut when requested
367 if (device && device_option && !strcmp(device_option, "uvm"))
368 {
369 host_mem_type = MemoryType::MANAGED;
370 device_mem_type = MemoryType::MANAGED;
371 }
372
373 // Enable the DEBUG mode when requested
374 if (debug)
375 {
376 host_mem_type = MemoryType::HOST_DEBUG;
377 device_mem_type = MemoryType::DEVICE_DEBUG;
378 }
379
380 MFEM_VERIFY(!device || IsDeviceMemory(device_mem_type),
381 "invalid device memory configuration!");
382
383 // Update the memory manager with the new settings
384 mm.Configure(host_mem_type, device_mem_type);
385}
386
387void Device::Enable()
388{
389 const bool accelerated = Get().backends & ~(Backend::CPU);
390 if (accelerated) { Get().mode = Device::ACCELERATED;}
391 Get().UpdateMemoryTypeAndClass();
392}
393
394#ifdef MFEM_USE_CUDA
395static void DeviceSetup(const int dev, int &ngpu)
396{
397 ngpu = CuGetDeviceCount();
398 MFEM_VERIFY(ngpu > 0, "No CUDA device found!");
399 MFEM_GPU_CHECK(cudaSetDevice(dev));
400}
401#endif
402
403static void CudaDeviceSetup(const int dev, int &ngpu)
404{
405#ifdef MFEM_USE_CUDA
406 DeviceSetup(dev, ngpu);
407#else
408 MFEM_CONTRACT_VAR(dev);
409 MFEM_CONTRACT_VAR(ngpu);
410#endif
411}
412
413static void HipDeviceSetup(const int dev, int &ngpu)
414{
415#ifdef MFEM_USE_HIP
416 MFEM_GPU_CHECK(hipGetDeviceCount(&ngpu));
417 MFEM_VERIFY(ngpu > 0, "No HIP device found!");
418 MFEM_GPU_CHECK(hipSetDevice(dev));
419#else
420 MFEM_CONTRACT_VAR(dev);
421 MFEM_CONTRACT_VAR(ngpu);
422#endif
423}
424
425static void RajaDeviceSetup(const int dev, int &ngpu)
426{
427#ifdef MFEM_USE_CUDA
428 if (ngpu <= 0) { DeviceSetup(dev, ngpu); }
429#elif defined(MFEM_USE_HIP)
430 HipDeviceSetup(dev, ngpu);
431#else
432 MFEM_CONTRACT_VAR(dev);
433 MFEM_CONTRACT_VAR(ngpu);
434#endif
435}
436
437static void OccaDeviceSetup(const int dev)
438{
439#ifdef MFEM_USE_OCCA
440 const int cpu = Device::Allows(Backend::OCCA_CPU);
441 const int omp = Device::Allows(Backend::OCCA_OMP);
442 const int cuda = Device::Allows(Backend::OCCA_CUDA);
443 if (cpu + omp + cuda > 1)
444 {
445 MFEM_ABORT("Only one OCCA backend can be configured at a time!");
446 }
447 if (cuda)
448 {
449#if OCCA_CUDA_ENABLED
450 std::string mode("mode: 'CUDA', device_id : ");
451 internal::occaDevice.setup(mode.append(1,'0'+dev));
452#else
453 MFEM_ABORT("the OCCA CUDA backend requires OCCA built with CUDA!");
454#endif
455 }
456 else if (omp)
457 {
458#if OCCA_OPENMP_ENABLED
459 internal::occaDevice.setup("mode: 'OpenMP'");
460#else
461 MFEM_ABORT("the OCCA OpenMP backend requires OCCA built with OpenMP!");
462#endif
463 }
464 else
465 {
466 internal::occaDevice.setup("mode: 'Serial'");
467 }
468
469 std::string mfemDir;
470 if (occa::io::exists(MFEM_INSTALL_DIR "/include/mfem/"))
471 {
472 mfemDir = MFEM_INSTALL_DIR "/include/mfem/";
473 }
474 else if (occa::io::exists(MFEM_SOURCE_DIR))
475 {
476 mfemDir = MFEM_SOURCE_DIR;
477 }
478 else
479 {
480 MFEM_ABORT("Cannot find OCCA kernels in MFEM_INSTALL_DIR or MFEM_SOURCE_DIR");
481 }
482
483 occa::io::addLibraryPath("mfem", mfemDir);
484 occa::loadKernels("mfem");
485#else
486 MFEM_CONTRACT_VAR(dev);
487 MFEM_ABORT("the OCCA backends require MFEM built with MFEM_USE_OCCA=YES");
488#endif
489}
490
491static void CeedDeviceSetup(const char* ceed_spec)
492{
493#ifdef MFEM_USE_CEED
494 CeedInit(ceed_spec, &internal::ceed);
495 const char *ceed_backend;
496 CeedGetResource(internal::ceed, &ceed_backend);
497 if (strcmp(ceed_spec, ceed_backend) && strcmp(ceed_spec, "/cpu/self") &&
498 strcmp(ceed_spec, "/gpu/hip"))
499 {
500 mfem::out << std::endl << "WARNING!!!\n"
501 "libCEED is not using the requested backend!!!\n"
502 "WARNING!!!\n" << std::endl;
503 }
504#ifdef MFEM_DEBUG
505 CeedSetErrorHandler(internal::ceed, CeedErrorStore);
506#endif
507#else
508 MFEM_CONTRACT_VAR(ceed_spec);
509#endif
510}
511
512void Device::Setup(const int device_id)
513{
514 MFEM_VERIFY(ngpu == -1, "the mfem::Device is already configured!");
515
516 ngpu = 0;
517 dev = device_id;
518#ifndef MFEM_USE_CUDA
519 MFEM_VERIFY(!Allows(Backend::CUDA_MASK),
520 "the CUDA backends require MFEM built with MFEM_USE_CUDA=YES");
521#endif
522#ifndef MFEM_USE_HIP
523 MFEM_VERIFY(!Allows(Backend::HIP_MASK),
524 "the HIP backends require MFEM built with MFEM_USE_HIP=YES");
525#endif
526#ifndef MFEM_USE_RAJA
527 MFEM_VERIFY(!Allows(Backend::RAJA_MASK),
528 "the RAJA backends require MFEM built with MFEM_USE_RAJA=YES");
529#endif
530#ifndef MFEM_USE_OPENMP
532 "the OpenMP and RAJA OpenMP backends require MFEM built with"
533 " MFEM_USE_OPENMP=YES");
534#endif
535#ifndef MFEM_USE_CEED
536 MFEM_VERIFY(!Allows(Backend::CEED_MASK),
537 "the CEED backends require MFEM built with MFEM_USE_CEED=YES");
538#else
539 int ceed_cpu = Allows(Backend::CEED_CPU);
540 int ceed_cuda = Allows(Backend::CEED_CUDA);
541 int ceed_hip = Allows(Backend::CEED_HIP);
542 MFEM_VERIFY(ceed_cpu + ceed_cuda + ceed_hip <= 1,
543 "Only one CEED backend can be enabled at a time!");
544#endif
545 if (Allows(Backend::CUDA)) { CudaDeviceSetup(dev, ngpu); }
546 if (Allows(Backend::HIP)) { HipDeviceSetup(dev, ngpu); }
548 { RajaDeviceSetup(dev, ngpu); }
549 // The check for MFEM_USE_OCCA is in the function OccaDeviceSetup().
550 if (Allows(Backend::OCCA_MASK)) { OccaDeviceSetup(dev); }
552 {
553 if (!device_option)
554 {
555 CeedDeviceSetup("/cpu/self");
556 }
557 else
558 {
559 CeedDeviceSetup(device_option);
560 }
561 }
563 {
564 if (!device_option)
565 {
566 // NOTE: libCEED's /gpu/cuda/gen backend is non-deterministic!
567 CeedDeviceSetup("/gpu/cuda/gen");
568 }
569 else
570 {
571 CeedDeviceSetup(device_option);
572 }
573 }
575 {
576 if (!device_option)
577 {
578 CeedDeviceSetup("/gpu/hip");
579 }
580 else
581 {
582 CeedDeviceSetup(device_option);
583 }
584 }
585 if (Allows(Backend::DEBUG_DEVICE)) { ngpu = 1; }
586}
587
588} // mfem
The MFEM Device class abstracts hardware devices such as GPUs, as well as programming models such as ...
Definition: device.hpp:123
~Device()
Destructor.
Definition: device.cpp:151
static bool IsConfigured()
Return true if Configure() has been called previously.
Definition: device.hpp:241
void Configure(const std::string &device, const int dev=0)
Configure the Device backends.
Definition: device.cpp:183
void Print(std::ostream &out=mfem::out)
Print the configuration of the MFEM virtual device object.
Definition: device.cpp:286
static bool Allows(unsigned long b_mask)
Return true if any of the backends in the backend mask, b_mask, are allowed.
Definition: device.hpp:259
static void SetMemoryTypes(MemoryType h_mt, MemoryType d_mt)
Set the default host and device MemoryTypes, h_mt and d_mt.
Definition: device.cpp:263
Device()
Default constructor. Unless Configure() is called later, the default Backend::CPU will be used.
Definition: device.cpp:73
static void InitDevice()
Configure HYPRE's compute and memory policy.
Definition: hypre.cpp:43
void Configure(const MemoryType h_mt, const MemoryType d_mt)
Configure the Memory manager with given default host and device types. This method will be called whe...
void Destroy()
Free all the device memories.
std::unordered_map< const BasisKey, CeedBasis, BasisHash > BasisMap
Definition: util.hpp:144
std::unordered_map< const RestrKey, CeedElemRestriction, RestrHash > RestrMap
Definition: util.hpp:166
bool IsDeviceMemory(MemoryType mt)
Return true if the given memory type is in MemoryClass::DEVICE.
Definition: mem_manager.hpp:95
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
Definition: globals.hpp:66
MemoryManager mm
The (single) global memory manager object.
int CuGetDeviceCount()
Get the number of CUDA devices.
Definition: cuda.cpp:185
bool IsHostMemory(MemoryType mt)
Return true if the given memory type is in MemoryClass::HOST.
Definition: mem_manager.hpp:92
const char * MemoryTypeName[MemoryTypeSize]
Memory type names, used during Device:: configuration.
MemoryType
Memory types supported by MFEM.
Definition: mem_manager.hpp:39
@ HOST_32
Host memory; aligned at 32 bytes.
@ HOST_64
Host memory; aligned at 64 bytes.
@ HOST
Host memory; using new[] and delete[].
@ HOST_DEBUG
Host memory; allocated from a "host-debug" pool.
@ DEVICE
Device memory; using CUDA or HIP *Malloc and *Free.
Id
In the documentation below, we use square brackets to indicate the type of the backend: host or devic...
Definition: device.hpp:32
@ RAJA_OMP
[host] RAJA OpenMP backend. Enabled when MFEM_USE_RAJA = YES and MFEM_USE_OPENMP = YES.
Definition: device.hpp:46
@ RAJA_CUDA
[device] RAJA CUDA backend. Enabled when MFEM_USE_RAJA = YES and MFEM_USE_CUDA = YES.
Definition: device.hpp:49
@ DEBUG_DEVICE
[device] Debug backend: host memory is READ/WRITE protected while a device is in use....
Definition: device.hpp:76
@ RAJA_CPU
[host] RAJA CPU backend: sequential execution on each MPI rank. Enabled when MFEM_USE_RAJA = YES.
Definition: device.hpp:43
@ OMP
[host] OpenMP backend. Enabled when MFEM_USE_OPENMP = YES.
Definition: device.hpp:36
@ HIP
[device] HIP backend. Enabled when MFEM_USE_HIP = YES.
Definition: device.hpp:40
@ OCCA_OMP
[host] OCCA OpenMP backend. Enabled when MFEM_USE_OCCA = YES.
Definition: device.hpp:57
@ RAJA_HIP
[device] RAJA HIP backend. Enabled when MFEM_USE_RAJA = YES and MFEM_USE_HIP = YES.
Definition: device.hpp:52
@ OCCA_CUDA
[device] OCCA CUDA backend. Enabled when MFEM_USE_OCCA = YES and MFEM_USE_CUDA = YES.
Definition: device.hpp:60
@ CEED_CPU
[host] CEED CPU backend. GPU backends can still be used, but with expensive memory transfers....
Definition: device.hpp:63
@ OCCA_CPU
[host] OCCA CPU backend: sequential execution on each MPI rank. Enabled when MFEM_USE_OCCA = YES.
Definition: device.hpp:55
@ CEED_CUDA
[device] CEED CUDA backend working together with the CUDA backend. Enabled when MFEM_USE_CEED = YES a...
Definition: device.hpp:67
@ CPU
[host] Default CPU backend: sequential execution on each MPI rank.
Definition: device.hpp:34
@ CUDA
[device] CUDA backend. Enabled when MFEM_USE_CUDA = YES.
Definition: device.hpp:38
@ CEED_HIP
[device] CEED HIP backend working together with the HIP backend. Enabled when MFEM_USE_CEED = YES and...
Definition: device.hpp:70
@ RAJA_MASK
Biwise-OR of all RAJA backends.
Definition: device.hpp:100
@ DEVICE_MASK
Biwise-OR of all device backends.
Definition: device.hpp:97
@ CEED_MASK
Bitwise-OR of all CEED backends.
Definition: device.hpp:95
@ OCCA_MASK
Biwise-OR of all OCCA backends.
Definition: device.hpp:102
@ HIP_MASK
Biwise-OR of all HIP backends.
Definition: device.hpp:91
@ NUM_BACKENDS
Number of backends: from (1 << 0) to (1 << (NUM_BACKENDS-1)).
Definition: device.hpp:84
@ CUDA_MASK
Biwise-OR of all CUDA backends.
Definition: device.hpp:89