MFEM  v4.1.0
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
cuda.hpp
Go to the documentation of this file.
1 // Copyright (c) 2010-2020, Lawrence Livermore National Security, LLC. Produced
2 // at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3 // LICENSE and NOTICE for details. LLNL-CODE-806117.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability visit https://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the BSD-3 license. We welcome feedback and contributions, see file
10 // CONTRIBUTING.md for details.
11 
12 #ifndef MFEM_CUDA_HPP
13 #define MFEM_CUDA_HPP
14 
15 #include "../config/config.hpp"
16 #include "error.hpp"
17 
18 #ifdef MFEM_USE_CUDA
19 #include <cuda_runtime.h>
20 #include <cuda.h>
21 #endif
22 
23 // CUDA block size used by MFEM.
24 #define MFEM_CUDA_BLOCKS 256
25 
26 #ifdef MFEM_USE_CUDA
27 #define MFEM_DEVICE __device__
28 #define MFEM_HOST_DEVICE __host__ __device__
29 // Define a CUDA error check macro, MFEM_GPU_CHECK(x), where x returns/is of
30 // type 'cudaError_t'. This macro evaluates 'x' and raises an error if the
31 // result is not cudaSuccess.
32 #define MFEM_GPU_CHECK(x) \
33  do \
34  { \
35  cudaError_t err = (x); \
36  if (err != cudaSuccess) \
37  { \
38  mfem_cuda_error(err, #x, _MFEM_FUNC_NAME, __FILE__, __LINE__); \
39  } \
40  } \
41  while (0)
42 #define MFEM_DEVICE_SYNC MFEM_GPU_CHECK(cudaDeviceSynchronize())
43 #define MFEM_STREAM_SYNC MFEM_GPU_CHECK(cudaStreamSynchronize(0))
44 #endif // MFEM_USE_CUDA
45 
46 // Define the MFEM inner threading macros
47 #if defined(MFEM_USE_CUDA) && defined(__CUDA_ARCH__)
48 #define MFEM_SHARED __shared__
49 #define MFEM_SYNC_THREAD __syncthreads()
50 #define MFEM_THREAD_ID(k) threadIdx.k
51 #define MFEM_THREAD_SIZE(k) blockDim.k
52 #define MFEM_FOREACH_THREAD(i,k,N) for(int i=threadIdx.k; i<N; i+=blockDim.k)
53 #endif
54 
55 #if !(defined(MFEM_USE_CUDA) || defined(MFEM_USE_HIP))
56 #define MFEM_DEVICE
57 #define MFEM_HOST_DEVICE
58 #define MFEM_DEVICE_SYNC
59 #define MFEM_STREAM_SYNC
60 #endif
61 
62 #if !((defined(MFEM_USE_CUDA) && defined(__CUDA_ARCH__)) || \
63  (defined(MFEM_USE_HIP) && defined(__ROCM_ARCH__)))
64 #define MFEM_SHARED
65 #define MFEM_SYNC_THREAD
66 #define MFEM_THREAD_ID(k) 0
67 #define MFEM_THREAD_SIZE(k) 1
68 #define MFEM_FOREACH_THREAD(i,k,N) for(int i=0; i<N; i++)
69 #endif
70 
71 namespace mfem
72 {
73 
74 #ifdef MFEM_USE_CUDA
75 // Function used by the macro MFEM_GPU_CHECK.
76 void mfem_cuda_error(cudaError_t err, const char *expr, const char *func,
77  const char *file, int line);
78 #endif
79 
80 /// Allocates device memory
81 void* CuMemAlloc(void **d_ptr, size_t bytes);
82 
83 /// Allocates managed device memory
84 void* CuMallocManaged(void **d_ptr, size_t bytes);
85 
86 /// Frees device memory
87 void* CuMemFree(void *d_ptr);
88 
89 /// Copies memory from Host to Device
90 void* CuMemcpyHtoD(void *d_dst, const void *h_src, size_t bytes);
91 
92 /// Copies memory from Host to Device
93 void* CuMemcpyHtoDAsync(void *d_dst, const void *h_src, size_t bytes);
94 
95 /// Copies memory from Device to Device
96 void* CuMemcpyDtoD(void *d_dst, const void *d_src, size_t bytes);
97 
98 /// Copies memory from Device to Device
99 void* CuMemcpyDtoDAsync(void *d_dst, const void *d_src, size_t bytes);
100 
101 /// Copies memory from Device to Host
102 void* CuMemcpyDtoH(void *h_dst, const void *d_src, size_t bytes);
103 
104 /// Copies memory from Device to Host
105 void* CuMemcpyDtoHAsync(void *h_dst, const void *d_src, size_t bytes);
106 
107 /// Check the error code returned by cudaGetLastError(), aborting on error.
108 void CuCheckLastError();
109 
110 /// Get the number of CUDA devices
111 int CuGetDeviceCount();
112 
113 } // namespace mfem
114 
115 #endif // MFEM_CUDA_HPP
void * CuMemcpyHtoD(void *dst, const void *src, size_t bytes)
Copies memory from Host to Device.
Definition: cuda.cpp:79
void * CuMemFree(void *dptr)
Frees device memory.
Definition: cuda.cpp:64
void CuCheckLastError()
Check the error code returned by cudaGetLastError(), aborting on error.
Definition: cuda.cpp:148
int CuGetDeviceCount()
Get the number of CUDA devices.
Definition: cuda.cpp:155
void * CuMallocManaged(void **dptr, size_t bytes)
Allocates managed device memory.
Definition: cuda.cpp:49
void mfem_cuda_error(cudaError_t err, const char *expr, const char *func, const char *file, int line)
Definition: cuda.cpp:23
void * CuMemcpyDtoD(void *dst, const void *src, size_t bytes)
Copies memory from Device to Device.
Definition: cuda.cpp:102
void * CuMemcpyDtoDAsync(void *dst, const void *src, size_t bytes)
Copies memory from Device to Device.
Definition: cuda.cpp:117
OutStream err(std::cerr)
Global stream used by the library for standard error output. Initially it uses the same std::streambu...
Definition: globals.hpp:71
void * CuMemcpyDtoHAsync(void *dst, const void *src, size_t bytes)
Copies memory from Device to Host.
Definition: cuda.cpp:140
void * CuMemcpyHtoDAsync(void *dst, const void *src, size_t bytes)
Copies memory from Host to Device.
Definition: cuda.cpp:94
void * CuMemAlloc(void **dptr, size_t bytes)
Allocates device memory.
Definition: cuda.cpp:34
void * CuMemcpyDtoH(void *dst, const void *src, size_t bytes)
Copies memory from Device to Host.
Definition: cuda.cpp:125