24 const char *file,
int line)
26 mfem::err <<
"\n\nCUDA error: (" << expr <<
") failed with error:\n --> "
27 << cudaGetErrorString(
err)
28 <<
"\n ... in function: " << func
29 <<
"\n ... in file: " << file <<
':' << line <<
'\n';
37#ifdef MFEM_TRACK_CUDA_MEM
38 mfem::out <<
"CuMemAlloc(): allocating " << bytes <<
" bytes ... "
41 MFEM_GPU_CHECK(cudaMalloc(dptr, bytes));
42#ifdef MFEM_TRACK_CUDA_MEM
43 mfem::out <<
"done: " << *dptr << std::endl;
52#ifdef MFEM_TRACK_CUDA_MEM
53 mfem::out <<
"CuMallocManaged(): allocating " << bytes <<
" bytes ... "
56 MFEM_GPU_CHECK(cudaMallocManaged(dptr, bytes));
57#ifdef MFEM_TRACK_CUDA_MEM
58 mfem::out <<
"done: " << *dptr << std::endl;
67#ifdef MFEM_TRACK_CUDA_MEM
68 mfem::out <<
"CuMemAllocHostPinned(): allocating " << bytes <<
" bytes ... "
71 MFEM_GPU_CHECK(cudaMallocHost(ptr, bytes));
72#ifdef MFEM_TRACK_CUDA_MEM
73 mfem::out <<
"done: " << *ptr << std::endl;
82#ifdef MFEM_TRACK_CUDA_MEM
83 mfem::out <<
"CuMemFree(): deallocating memory @ " << dptr <<
" ... "
86 MFEM_GPU_CHECK(cudaFree(dptr));
87#ifdef MFEM_TRACK_CUDA_MEM
97#ifdef MFEM_TRACK_CUDA_MEM
98 mfem::out <<
"CuMemFreeHostPinned(): deallocating memory @ " << ptr <<
" ... "
101 MFEM_GPU_CHECK(cudaFreeHost(ptr));
102#ifdef MFEM_TRACK_CUDA_MEM
112#ifdef MFEM_TRACK_CUDA_MEM
113 mfem::out <<
"CuMemcpyHtoD(): copying " << bytes <<
" bytes from "
114 << src <<
" to " << dst <<
" ... " << std::flush;
116 MFEM_GPU_CHECK(cudaMemcpy(dst, src, bytes, cudaMemcpyHostToDevice));
117#ifdef MFEM_TRACK_CUDA_MEM
127 MFEM_GPU_CHECK(cudaMemcpyAsync(dst, src, bytes, cudaMemcpyHostToDevice));
135#ifdef MFEM_TRACK_CUDA_MEM
136 mfem::out <<
"CuMemcpyDtoD(): copying " << bytes <<
" bytes from "
137 << src <<
" to " << dst <<
" ... " << std::flush;
139 MFEM_GPU_CHECK(cudaMemcpy(dst, src, bytes, cudaMemcpyDeviceToDevice));
140#ifdef MFEM_TRACK_CUDA_MEM
150 MFEM_GPU_CHECK(cudaMemcpyAsync(dst, src, bytes, cudaMemcpyDeviceToDevice));
158#ifdef MFEM_TRACK_CUDA_MEM
159 mfem::out <<
"CuMemcpyDtoH(): copying " << bytes <<
" bytes from "
160 << src <<
" to " << dst <<
" ... " << std::flush;
162 MFEM_GPU_CHECK(cudaMemcpy(dst, src, bytes, cudaMemcpyDeviceToHost));
163#ifdef MFEM_TRACK_CUDA_MEM
173 MFEM_GPU_CHECK(cudaMemcpyAsync(dst, src, bytes, cudaMemcpyDeviceToHost));
181 MFEM_GPU_CHECK(cudaGetLastError());
189 MFEM_GPU_CHECK(cudaGetDeviceCount(&num_gpus));
void * CuMemAlloc(void **dptr, size_t bytes)
Allocates device memory and returns destination ptr.
void * CuMemFree(void *dptr)
Frees device memory and returns destination ptr.
void * CuMemcpyDtoHAsync(void *dst, const void *src, size_t bytes)
Copies memory from Device to Host.
void mfem_error(const char *msg)
void * CuMallocManaged(void **dptr, size_t bytes)
Allocates managed device memory.
void * CuMemcpyDtoH(void *dst, const void *src, size_t bytes)
Copies memory from Device to Host.
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
void * CuMemAllocHostPinned(void **ptr, size_t bytes)
Allocates page-locked (pinned) host memory.
void * CuMemFreeHostPinned(void *ptr)
Frees page-locked (pinned) host memory and returns destination ptr.
void mfem_cuda_error(cudaError_t err, const char *expr, const char *func, const char *file, int line)
void * CuMemcpyHtoD(void *dst, const void *src, size_t bytes)
Copies memory from Host to Device and returns destination ptr.
int CuGetDeviceCount()
Get the number of CUDA devices.
OutStream err(std::cerr)
Global stream used by the library for standard error output. Initially it uses the same std::streambu...
void * CuMemcpyHtoDAsync(void *dst, const void *src, size_t bytes)
Copies memory from Host to Device and returns destination ptr.
void CuCheckLastError()
Check the error code returned by cudaGetLastError(), aborting on error.
void * CuMemcpyDtoDAsync(void *dst, const void *src, size_t bytes)
Copies memory from Device to Device.
void * CuMemcpyDtoD(void *dst, const void *src, size_t bytes)
Copies memory from Device to Device.