23 #ifndef HIP_INCLUDE_HIP_NVIDIA_DETAIL_HIP_RUNTIME_API_H
24 #define HIP_INCLUDE_HIP_NVIDIA_DETAIL_HIP_RUNTIME_API_H
26 #include <cuda_runtime_api.h>
28 #include <cuda_profiler_api.h>
29 #include <cuda_fp16.h>
36 #define __dparm(x) = x
42 #if defined(__DOXYGEN_ONLY__) || defined(HIP_ENABLE_DEPRECATED)
43 #define __HIP_DEPRECATED
44 #elif defined(_MSC_VER)
45 #define __HIP_DEPRECATED __declspec(deprecated)
46 #elif defined(__GNUC__)
47 #define __HIP_DEPRECATED __attribute__((deprecated))
49 #define __HIP_DEPRECATED
58 typedef enum hipMemcpyKind {
60 hipMemcpyHostToDevice,
61 hipMemcpyDeviceToHost,
62 hipMemcpyDeviceToDevice,
83 #define hipDataType cudaDataType
84 #define HIP_R_16F CUDA_R_16F
85 #define HIP_R_32F CUDA_R_32F
86 #define HIP_R_64F CUDA_R_64F
87 #define HIP_C_16F CUDA_C_16F
88 #define HIP_C_32F CUDA_C_32F
89 #define HIP_C_64F CUDA_C_64F
92 #define hipLibraryPropertyType libraryPropertyType
93 #define HIP_LIBRARY_MAJOR_VERSION MAJOR_VERSION
94 #define HIP_LIBRARY_MINOR_VERSION MINOR_VERSION
95 #define HIP_LIBRARY_PATCH_LEVEL PATCH_LEVEL
97 #define HIP_ARRAY_DESCRIPTOR CUDA_ARRAY_DESCRIPTOR
98 #define HIP_ARRAY3D_DESCRIPTOR CUDA_ARRAY3D_DESCRIPTOR
101 #define HIP_AD_FORMAT_UNSIGNED_INT8 CU_AD_FORMAT_UNSIGNED_INT8
102 #define HIP_AD_FORMAT_UNSIGNED_INT16 CU_AD_FORMAT_UNSIGNED_INT16
103 #define HIP_AD_FORMAT_UNSIGNED_INT32 CU_AD_FORMAT_UNSIGNED_INT32
104 #define HIP_AD_FORMAT_SIGNED_INT8 CU_AD_FORMAT_SIGNED_INT8
105 #define HIP_AD_FORMAT_SIGNED_INT16 CU_AD_FORMAT_SIGNED_INT16
106 #define HIP_AD_FORMAT_SIGNED_INT32 CU_AD_FORMAT_SIGNED_INT32
107 #define HIP_AD_FORMAT_HALF CU_AD_FORMAT_HALF
108 #define HIP_AD_FORMAT_FLOAT CU_AD_FORMAT_FLOAT
111 #define hipArray_Format CUarray_format
113 inline static CUarray_format hipArray_FormatToCUarray_format(
114 hipArray_Format format) {
116 case HIP_AD_FORMAT_UNSIGNED_INT8:
117 return CU_AD_FORMAT_UNSIGNED_INT8;
118 case HIP_AD_FORMAT_UNSIGNED_INT16:
119 return CU_AD_FORMAT_UNSIGNED_INT16;
120 case HIP_AD_FORMAT_UNSIGNED_INT32:
121 return CU_AD_FORMAT_UNSIGNED_INT32;
122 case HIP_AD_FORMAT_SIGNED_INT8:
123 return CU_AD_FORMAT_SIGNED_INT8;
124 case HIP_AD_FORMAT_SIGNED_INT16:
125 return CU_AD_FORMAT_SIGNED_INT16;
126 case HIP_AD_FORMAT_SIGNED_INT32:
127 return CU_AD_FORMAT_SIGNED_INT32;
128 case HIP_AD_FORMAT_HALF:
129 return CU_AD_FORMAT_HALF;
130 case HIP_AD_FORMAT_FLOAT:
131 return CU_AD_FORMAT_FLOAT;
133 return CU_AD_FORMAT_UNSIGNED_INT8;
137 #define HIP_TR_ADDRESS_MODE_WRAP CU_TR_ADDRESS_MODE_WRAP
138 #define HIP_TR_ADDRESS_MODE_CLAMP CU_TR_ADDRESS_MODE_CLAMP
139 #define HIP_TR_ADDRESS_MODE_MIRROR CU_TR_ADDRESS_MODE_MIRROR
140 #define HIP_TR_ADDRESS_MODE_BORDER CU_TR_ADDRESS_MODE_BORDER
143 #define hipAddress_mode CUaddress_mode
145 inline static CUaddress_mode hipAddress_modeToCUaddress_mode(
146 hipAddress_mode mode) {
148 case HIP_TR_ADDRESS_MODE_WRAP:
149 return CU_TR_ADDRESS_MODE_WRAP;
150 case HIP_TR_ADDRESS_MODE_CLAMP:
151 return CU_TR_ADDRESS_MODE_CLAMP;
152 case HIP_TR_ADDRESS_MODE_MIRROR:
153 return CU_TR_ADDRESS_MODE_MIRROR;
154 case HIP_TR_ADDRESS_MODE_BORDER:
155 return CU_TR_ADDRESS_MODE_BORDER;
157 return CU_TR_ADDRESS_MODE_WRAP;
161 #define HIP_TR_FILTER_MODE_POINT CU_TR_FILTER_MODE_POINT
162 #define HIP_TR_FILTER_MODE_LINEAR CU_TR_FILTER_MODE_LINEAR
165 #define hipFilter_mode CUfilter_mode
167 inline static CUfilter_mode hipFilter_mode_enumToCUfilter_mode(
168 hipFilter_mode mode) {
170 case HIP_TR_FILTER_MODE_POINT:
171 return CU_TR_FILTER_MODE_POINT;
172 case HIP_TR_FILTER_MODE_LINEAR:
173 return CU_TR_FILTER_MODE_LINEAR;
175 return CU_TR_FILTER_MODE_POINT;
180 #define HIP_RESOURCE_TYPE_ARRAY CU_RESOURCE_TYPE_ARRAY
181 #define HIP_RESOURCE_TYPE_MIPMAPPED_ARRAY CU_RESOURCE_TYPE_MIPMAPPED_ARRAY
182 #define HIP_RESOURCE_TYPE_LINEAR CU_RESOURCE_TYPE_LINEAR
183 #define HIP_RESOURCE_TYPE_PITCH2D CU_RESOURCE_TYPE_PITCH2D
186 #define hipResourcetype CUresourcetype
188 inline static CUresourcetype hipResourcetype_enumToCUresourcetype(
189 hipResourcetype resType) {
191 case HIP_RESOURCE_TYPE_ARRAY:
192 return CU_RESOURCE_TYPE_ARRAY;
193 case HIP_RESOURCE_TYPE_MIPMAPPED_ARRAY:
194 return CU_RESOURCE_TYPE_MIPMAPPED_ARRAY;
195 case HIP_RESOURCE_TYPE_LINEAR:
196 return CU_RESOURCE_TYPE_LINEAR;
197 case HIP_RESOURCE_TYPE_PITCH2D:
198 return CU_RESOURCE_TYPE_PITCH2D;
200 return CU_RESOURCE_TYPE_ARRAY;
204 #define hipTexRef CUtexref
205 #define hiparray CUarray
208 typedef enum cudaTextureAddressMode hipTextureAddressMode;
209 #define hipAddressModeWrap cudaAddressModeWrap
210 #define hipAddressModeClamp cudaAddressModeClamp
211 #define hipAddressModeMirror cudaAddressModeMirror
212 #define hipAddressModeBorder cudaAddressModeBorder
215 typedef enum cudaTextureFilterMode hipTextureFilterMode;
216 #define hipFilterModePoint cudaFilterModePoint
217 #define hipFilterModeLinear cudaFilterModeLinear
220 typedef enum cudaTextureReadMode hipTextureReadMode;
221 #define hipReadModeElementType cudaReadModeElementType
222 #define hipReadModeNormalizedFloat cudaReadModeNormalizedFloat
225 typedef enum cudaChannelFormatKind hipChannelFormatKind;
226 #define hipChannelFormatKindSigned cudaChannelFormatKindSigned
227 #define hipChannelFormatKindUnsigned cudaChannelFormatKindUnsigned
228 #define hipChannelFormatKindFloat cudaChannelFormatKindFloat
229 #define hipChannelFormatKindNone cudaChannelFormatKindNone
231 #define hipSurfaceBoundaryMode cudaSurfaceBoundaryMode
232 #define hipBoundaryModeZero cudaBoundaryModeZero
233 #define hipBoundaryModeTrap cudaBoundaryModeTrap
234 #define hipBoundaryModeClamp cudaBoundaryModeClamp
237 #define hipFuncCachePreferNone cudaFuncCachePreferNone
238 #define hipFuncCachePreferShared cudaFuncCachePreferShared
239 #define hipFuncCachePreferL1 cudaFuncCachePreferL1
240 #define hipFuncCachePreferEqual cudaFuncCachePreferEqual
243 #define hipResourceType cudaResourceType
244 #define hipResourceTypeArray cudaResourceTypeArray
245 #define hipResourceTypeMipmappedArray cudaResourceTypeMipmappedArray
246 #define hipResourceTypeLinear cudaResourceTypeLinear
247 #define hipResourceTypePitch2D cudaResourceTypePitch2D
253 #define hipEventDefault cudaEventDefault
254 #define hipEventBlockingSync cudaEventBlockingSync
255 #define hipEventDisableTiming cudaEventDisableTiming
256 #define hipEventInterprocess cudaEventInterprocess
257 #define hipEventReleaseToDevice 0
258 #define hipEventReleaseToSystem 0
261 #define hipHostMallocDefault cudaHostAllocDefault
262 #define hipHostMallocPortable cudaHostAllocPortable
263 #define hipHostMallocMapped cudaHostAllocMapped
264 #define hipHostMallocWriteCombined cudaHostAllocWriteCombined
265 #define hipHostMallocCoherent 0x0
266 #define hipHostMallocNonCoherent 0x0
268 #define hipMemAttachGlobal cudaMemAttachGlobal
269 #define hipMemAttachHost cudaMemAttachHost
270 #define hipMemAttachSingle cudaMemAttachSingle
272 #define hipHostRegisterDefault cudaHostRegisterDefault
273 #define hipHostRegisterPortable cudaHostRegisterPortable
274 #define hipHostRegisterMapped cudaHostRegisterMapped
275 #define hipHostRegisterIoMemory cudaHostRegisterIoMemory
277 #define HIP_LAUNCH_PARAM_BUFFER_POINTER CU_LAUNCH_PARAM_BUFFER_POINTER
278 #define HIP_LAUNCH_PARAM_BUFFER_SIZE CU_LAUNCH_PARAM_BUFFER_SIZE
279 #define HIP_LAUNCH_PARAM_END CU_LAUNCH_PARAM_END
280 #define hipLimitMallocHeapSize cudaLimitMallocHeapSize
281 #define hipIpcMemLazyEnablePeerAccess cudaIpcMemLazyEnablePeerAccess
283 #define hipOccupancyDefault cudaOccupancyDefault
285 #define hipCooperativeLaunchMultiDeviceNoPreSync \
286 cudaCooperativeLaunchMultiDeviceNoPreSync
287 #define hipCooperativeLaunchMultiDeviceNoPostSync \
288 cudaCooperativeLaunchMultiDeviceNoPostSync
292 #define hipJitOptionMaxRegisters CU_JIT_MAX_REGISTERS
293 #define hipJitOptionThreadsPerBlock CU_JIT_THREADS_PER_BLOCK
294 #define hipJitOptionWallTime CU_JIT_WALL_TIME
295 #define hipJitOptionInfoLogBuffer CU_JIT_INFO_LOG_BUFFER
296 #define hipJitOptionInfoLogBufferSizeBytes CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES
297 #define hipJitOptionErrorLogBuffer CU_JIT_ERROR_LOG_BUFFER
298 #define hipJitOptionErrorLogBufferSizeBytes CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES
299 #define hipJitOptionOptimizationLevel CU_JIT_OPTIMIZATION_LEVEL
300 #define hipJitOptionTargetFromContext CU_JIT_TARGET_FROM_CUCONTEXT
301 #define hipJitOptionTarget CU_JIT_TARGET
302 #define hipJitOptionFallbackStrategy CU_JIT_FALLBACK_STRATEGY
303 #define hipJitOptionGenerateDebugInfo CU_JIT_GENERATE_DEBUG_INFO
304 #define hipJitOptionLogVerbose CU_JIT_LOG_VERBOSE
305 #define hipJitOptionGenerateLineInfo CU_JIT_GENERATE_LINE_INFO
306 #define hipJitOptionCacheMode CU_JIT_CACHE_MODE
307 #define hipJitOptionSm3xOpt CU_JIT_NEW_SM3X_OPT
308 #define hipJitOptionFastCompile CU_JIT_FAST_COMPILE
309 #define hipJitOptionNumOptions CU_JIT_NUM_OPTIONS
311 typedef cudaEvent_t hipEvent_t;
312 typedef cudaStream_t hipStream_t;
315 typedef enum cudaLimit hipLimit_t;
318 typedef CUcontext hipCtx_t;
320 typedef CUfunc_cache hipFuncCache;
321 typedef CUjit_option hipJitOption;
322 typedef CUdevice hipDevice_t;
323 typedef enum cudaDeviceP2PAttr hipDeviceP2PAttr;
324 #define hipDevP2PAttrPerformanceRank cudaDevP2PAttrPerformanceRank
325 #define hipDevP2PAttrAccessSupported cudaDevP2PAttrAccessSupported
326 #define hipDevP2PAttrNativeAtomicSupported cudaDevP2PAttrNativeAtomicSupported
327 #define hipDevP2PAttrHipArrayAccessSupported cudaDevP2PAttrCudaArrayAccessSupported
328 #define hipFuncAttributeMaxDynamicSharedMemorySize cudaFuncAttributeMaxDynamicSharedMemorySize
329 #define hipFuncAttributePreferredSharedMemoryCarveout cudaFuncAttributePreferredSharedMemoryCarveout
331 typedef CUmodule hipModule_t;
332 typedef CUfunction hipFunction_t;
333 typedef CUdeviceptr hipDeviceptr_t;
339 #define hipFunction_attribute CUfunction_attribute
340 #define hip_Memcpy2D CUDA_MEMCPY2D
341 #define HIP_MEMCPY3D CUDA_MEMCPY3D
342 #define hipMemcpy3DParms cudaMemcpy3DParms
343 #define hipArrayDefault cudaArrayDefault
344 #define hipArrayLayered cudaArrayLayered
345 #define hipArraySurfaceLoadStore cudaArraySurfaceLoadStore
346 #define hipArrayCubemap cudaArrayCubemap
347 #define hipArrayTextureGather cudaArrayTextureGather
349 typedef cudaTextureObject_t hipTextureObject_t;
351 #define hipTextureType1D cudaTextureType1D
352 #define hipTextureType1DLayered cudaTextureType1DLayered
353 #define hipTextureType2D cudaTextureType2D
354 #define hipTextureType2DLayered cudaTextureType2DLayered
355 #define hipTextureType3D cudaTextureType3D
357 #define hipDeviceScheduleAuto cudaDeviceScheduleAuto
358 #define hipDeviceScheduleSpin cudaDeviceScheduleSpin
359 #define hipDeviceScheduleYield cudaDeviceScheduleYield
360 #define hipDeviceScheduleBlockingSync cudaDeviceScheduleBlockingSync
361 #define hipDeviceScheduleMask cudaDeviceScheduleMask
362 #define hipDeviceMapHost cudaDeviceMapHost
363 #define hipDeviceLmemResizeToMax cudaDeviceLmemResizeToMax
365 #define hipCpuDeviceId cudaCpuDeviceId
366 #define hipInvalidDeviceId cudaInvalidDeviceId
369 #define make_hipExtent make_cudaExtent
370 #define make_hipPos make_cudaPos
371 #define make_hipPitchedPtr make_cudaPitchedPtr
373 #define hipStreamDefault cudaStreamDefault
374 #define hipStreamNonBlocking cudaStreamNonBlocking
381 #define hipSharedMemBankSizeDefault cudaSharedMemBankSizeDefault
382 #define hipSharedMemBankSizeFourByte cudaSharedMemBankSizeFourByte
383 #define hipSharedMemBankSizeEightByte cudaSharedMemBankSizeEightByte
386 #define HIP_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK
387 #define HIP_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES
388 #define HIP_FUNC_ATTRIBUTE_CONST_SIZE_BYTES CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES
389 #define HIP_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES
390 #define HIP_FUNC_ATTRIBUTE_NUM_REGS CU_FUNC_ATTRIBUTE_NUM_REGS
391 #define HIP_FUNC_ATTRIBUTE_PTX_VERSION CU_FUNC_ATTRIBUTE_PTX_VERSION
392 #define HIP_FUNC_ATTRIBUTE_BINARY_VERSION CU_FUNC_ATTRIBUTE_BINARY_VERSION
393 #define HIP_FUNC_ATTRIBUTE_CACHE_MODE_CA CU_FUNC_ATTRIBUTE_CACHE_MODE_CA
394 #define HIP_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES
395 #define HIP_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT
396 #define HIP_FUNC_ATTRIBUTE_MAX CU_FUNC_ATTRIBUTE_MAX
398 #if CUDA_VERSION >= 9000
399 #define __shfl(...) __shfl_sync(0xffffffff, __VA_ARGS__)
400 #define __shfl_up(...) __shfl_up_sync(0xffffffff, __VA_ARGS__)
401 #define __shfl_down(...) __shfl_down_sync(0xffffffff, __VA_ARGS__)
402 #define __shfl_xor(...) __shfl_xor_sync(0xffffffff, __VA_ARGS__)
403 #endif // CUDA_VERSION >= 9000
405 inline static hipError_t hipCUDAErrorTohipError(cudaError_t cuError) {
409 case cudaErrorProfilerDisabled:
410 return hipErrorProfilerDisabled;
411 case cudaErrorProfilerNotInitialized:
412 return hipErrorProfilerNotInitialized;
413 case cudaErrorProfilerAlreadyStarted:
414 return hipErrorProfilerAlreadyStarted;
415 case cudaErrorProfilerAlreadyStopped:
416 return hipErrorProfilerAlreadyStopped;
417 case cudaErrorInsufficientDriver:
418 return hipErrorInsufficientDriver;
419 case cudaErrorUnsupportedLimit:
420 return hipErrorUnsupportedLimit;
421 case cudaErrorPeerAccessUnsupported:
422 return hipErrorPeerAccessUnsupported;
423 case cudaErrorInvalidGraphicsContext:
424 return hipErrorInvalidGraphicsContext;
425 case cudaErrorSharedObjectSymbolNotFound:
426 return hipErrorSharedObjectSymbolNotFound;
427 case cudaErrorSharedObjectInitFailed:
428 return hipErrorSharedObjectInitFailed;
429 case cudaErrorOperatingSystem:
430 return hipErrorOperatingSystem;
431 case cudaErrorSetOnActiveProcess:
432 return hipErrorSetOnActiveProcess;
433 case cudaErrorIllegalAddress:
434 return hipErrorIllegalAddress;
435 case cudaErrorInvalidSymbol:
436 return hipErrorInvalidSymbol;
437 case cudaErrorMissingConfiguration:
438 return hipErrorMissingConfiguration;
439 case cudaErrorMemoryAllocation:
440 return hipErrorOutOfMemory;
441 case cudaErrorInitializationError:
442 return hipErrorNotInitialized;
443 case cudaErrorLaunchFailure:
445 case cudaErrorCooperativeLaunchTooLarge:
447 case cudaErrorPriorLaunchFailure:
448 return hipErrorPriorLaunchFailure;
449 case cudaErrorLaunchOutOfResources:
451 case cudaErrorInvalidDeviceFunction:
452 return hipErrorInvalidDeviceFunction;
453 case cudaErrorInvalidConfiguration:
454 return hipErrorInvalidConfiguration;
455 case cudaErrorInvalidDevice:
457 case cudaErrorInvalidValue:
459 case cudaErrorInvalidDevicePointer:
461 case cudaErrorInvalidMemcpyDirection:
463 case cudaErrorInvalidResourceHandle:
464 return hipErrorInvalidHandle;
465 case cudaErrorNotReady:
467 case cudaErrorNoDevice:
469 case cudaErrorPeerAccessAlreadyEnabled:
471 case cudaErrorPeerAccessNotEnabled:
473 case cudaErrorHostMemoryAlreadyRegistered:
475 case cudaErrorHostMemoryNotRegistered:
477 case cudaErrorMapBufferObjectFailed:
478 return hipErrorMapFailed;
479 case cudaErrorAssert:
481 case cudaErrorNotSupported:
483 case cudaErrorCudartUnloading:
484 return hipErrorDeinitialized;
485 case cudaErrorInvalidKernelImage:
486 return hipErrorInvalidImage;
487 case cudaErrorUnmapBufferObjectFailed:
488 return hipErrorUnmapFailed;
489 case cudaErrorNoKernelImageForDevice:
490 return hipErrorNoBinaryForGpu;
491 case cudaErrorECCUncorrectable:
492 return hipErrorECCNotCorrectable;
493 case cudaErrorDeviceAlreadyInUse:
494 return hipErrorContextAlreadyInUse;
495 case cudaErrorInvalidPtx:
497 case cudaErrorLaunchTimeout:
498 return hipErrorLaunchTimeOut;
499 #if CUDA_VERSION >= 10010
500 case cudaErrorInvalidSource:
501 return hipErrorInvalidSource;
502 case cudaErrorFileNotFound:
503 return hipErrorFileNotFound;
504 case cudaErrorSymbolNotFound:
505 return hipErrorNotFound;
506 case cudaErrorArrayIsMapped:
507 return hipErrorArrayIsMapped;
508 case cudaErrorNotMappedAsPointer:
509 return hipErrorNotMappedAsPointer;
510 case cudaErrorNotMappedAsArray:
511 return hipErrorNotMappedAsArray;
512 case cudaErrorNotMapped:
513 return hipErrorNotMapped;
514 case cudaErrorAlreadyAcquired:
515 return hipErrorAlreadyAcquired;
516 case cudaErrorAlreadyMapped:
517 return hipErrorAlreadyMapped;
519 #if CUDA_VERSION >= 10020
520 case cudaErrorDeviceUninitialized:
523 case cudaErrorUnknown:
525 return hipErrorUnknown;
529 inline static hipError_t hipCUResultTohipError(CUresult cuError) {
533 case CUDA_ERROR_OUT_OF_MEMORY:
534 return hipErrorOutOfMemory;
535 case CUDA_ERROR_INVALID_VALUE:
537 case CUDA_ERROR_INVALID_DEVICE:
539 case CUDA_ERROR_DEINITIALIZED:
540 return hipErrorDeinitialized;
541 case CUDA_ERROR_NO_DEVICE:
543 case CUDA_ERROR_INVALID_CONTEXT:
545 case CUDA_ERROR_NOT_INITIALIZED:
546 return hipErrorNotInitialized;
547 case CUDA_ERROR_INVALID_HANDLE:
548 return hipErrorInvalidHandle;
549 case CUDA_ERROR_MAP_FAILED:
550 return hipErrorMapFailed;
551 case CUDA_ERROR_PROFILER_DISABLED:
552 return hipErrorProfilerDisabled;
553 case CUDA_ERROR_PROFILER_NOT_INITIALIZED:
554 return hipErrorProfilerNotInitialized;
555 case CUDA_ERROR_PROFILER_ALREADY_STARTED:
556 return hipErrorProfilerAlreadyStarted;
557 case CUDA_ERROR_PROFILER_ALREADY_STOPPED:
558 return hipErrorProfilerAlreadyStopped;
559 case CUDA_ERROR_INVALID_IMAGE:
560 return hipErrorInvalidImage;
561 case CUDA_ERROR_CONTEXT_ALREADY_CURRENT:
562 return hipErrorContextAlreadyCurrent;
563 case CUDA_ERROR_UNMAP_FAILED:
564 return hipErrorUnmapFailed;
565 case CUDA_ERROR_ARRAY_IS_MAPPED:
566 return hipErrorArrayIsMapped;
567 case CUDA_ERROR_ALREADY_MAPPED:
568 return hipErrorAlreadyMapped;
569 case CUDA_ERROR_NO_BINARY_FOR_GPU:
570 return hipErrorNoBinaryForGpu;
571 case CUDA_ERROR_ALREADY_ACQUIRED:
572 return hipErrorAlreadyAcquired;
573 case CUDA_ERROR_NOT_MAPPED:
574 return hipErrorNotMapped;
575 case CUDA_ERROR_NOT_MAPPED_AS_ARRAY:
576 return hipErrorNotMappedAsArray;
577 case CUDA_ERROR_NOT_MAPPED_AS_POINTER:
578 return hipErrorNotMappedAsPointer;
579 case CUDA_ERROR_ECC_UNCORRECTABLE:
580 return hipErrorECCNotCorrectable;
581 case CUDA_ERROR_UNSUPPORTED_LIMIT:
582 return hipErrorUnsupportedLimit;
583 case CUDA_ERROR_CONTEXT_ALREADY_IN_USE:
584 return hipErrorContextAlreadyInUse;
585 case CUDA_ERROR_PEER_ACCESS_UNSUPPORTED:
586 return hipErrorPeerAccessUnsupported;
587 case CUDA_ERROR_INVALID_PTX:
589 case CUDA_ERROR_INVALID_GRAPHICS_CONTEXT:
590 return hipErrorInvalidGraphicsContext;
591 case CUDA_ERROR_INVALID_SOURCE:
592 return hipErrorInvalidSource;
593 case CUDA_ERROR_FILE_NOT_FOUND:
594 return hipErrorFileNotFound;
595 case CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND:
596 return hipErrorSharedObjectSymbolNotFound;
597 case CUDA_ERROR_SHARED_OBJECT_INIT_FAILED:
598 return hipErrorSharedObjectInitFailed;
599 case CUDA_ERROR_OPERATING_SYSTEM:
600 return hipErrorOperatingSystem;
601 case CUDA_ERROR_NOT_FOUND:
602 return hipErrorNotFound;
603 case CUDA_ERROR_NOT_READY:
605 case CUDA_ERROR_ILLEGAL_ADDRESS:
606 return hipErrorIllegalAddress;
607 case CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES:
609 case CUDA_ERROR_LAUNCH_TIMEOUT:
610 return hipErrorLaunchTimeOut;
611 case CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED:
613 case CUDA_ERROR_PEER_ACCESS_NOT_ENABLED:
615 case CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE:
616 return hipErrorSetOnActiveProcess;
617 case CUDA_ERROR_ASSERT:
619 case CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED:
621 case CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED:
623 case CUDA_ERROR_LAUNCH_FAILED:
625 case CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE:
627 case CUDA_ERROR_NOT_SUPPORTED:
629 case CUDA_ERROR_UNKNOWN:
631 return hipErrorUnknown;
635 inline static cudaError_t hipErrorToCudaError(hipError_t hError) {
639 case hipErrorOutOfMemory:
640 return cudaErrorMemoryAllocation;
641 case hipErrorProfilerDisabled:
642 return cudaErrorProfilerDisabled;
643 case hipErrorProfilerNotInitialized:
644 return cudaErrorProfilerNotInitialized;
645 case hipErrorProfilerAlreadyStarted:
646 return cudaErrorProfilerAlreadyStarted;
647 case hipErrorProfilerAlreadyStopped:
648 return cudaErrorProfilerAlreadyStopped;
649 case hipErrorInvalidConfiguration:
650 return cudaErrorInvalidConfiguration;
652 return cudaErrorLaunchOutOfResources;
654 return cudaErrorInvalidValue;
655 case hipErrorInvalidHandle:
656 return cudaErrorInvalidResourceHandle;
658 return cudaErrorInvalidDevice;
660 return cudaErrorInvalidMemcpyDirection;
662 return cudaErrorInvalidDevicePointer;
663 case hipErrorNotInitialized:
664 return cudaErrorInitializationError;
666 return cudaErrorNoDevice;
668 return cudaErrorNotReady;
670 return cudaErrorPeerAccessNotEnabled;
672 return cudaErrorPeerAccessAlreadyEnabled;
674 return cudaErrorHostMemoryAlreadyRegistered;
676 return cudaErrorHostMemoryNotRegistered;
677 case hipErrorDeinitialized:
678 return cudaErrorCudartUnloading;
679 case hipErrorInvalidSymbol:
680 return cudaErrorInvalidSymbol;
681 case hipErrorInsufficientDriver:
682 return cudaErrorInsufficientDriver;
683 case hipErrorMissingConfiguration:
684 return cudaErrorMissingConfiguration;
685 case hipErrorPriorLaunchFailure:
686 return cudaErrorPriorLaunchFailure;
687 case hipErrorInvalidDeviceFunction:
688 return cudaErrorInvalidDeviceFunction;
689 case hipErrorInvalidImage:
690 return cudaErrorInvalidKernelImage;
692 #if CUDA_VERSION >= 10020
693 return cudaErrorDeviceUninitialized;
695 return cudaErrorUnknown;
697 case hipErrorMapFailed:
698 return cudaErrorMapBufferObjectFailed;
699 case hipErrorUnmapFailed:
700 return cudaErrorUnmapBufferObjectFailed;
701 case hipErrorArrayIsMapped:
702 #if CUDA_VERSION >= 10010
703 return cudaErrorArrayIsMapped;
705 return cudaErrorUnknown;
707 case hipErrorAlreadyMapped:
708 #if CUDA_VERSION >= 10010
709 return cudaErrorAlreadyMapped;
711 return cudaErrorUnknown;
713 case hipErrorNoBinaryForGpu:
714 return cudaErrorNoKernelImageForDevice;
715 case hipErrorAlreadyAcquired:
716 #if CUDA_VERSION >= 10010
717 return cudaErrorAlreadyAcquired;
719 return cudaErrorUnknown;
721 case hipErrorNotMapped:
722 #if CUDA_VERSION >= 10010
723 return cudaErrorNotMapped;
725 return cudaErrorUnknown;
727 case hipErrorNotMappedAsArray:
728 #if CUDA_VERSION >= 10010
729 return cudaErrorNotMappedAsArray;
731 return cudaErrorUnknown;
733 case hipErrorNotMappedAsPointer:
734 #if CUDA_VERSION >= 10010
735 return cudaErrorNotMappedAsPointer;
737 return cudaErrorUnknown;
739 case hipErrorECCNotCorrectable:
740 return cudaErrorECCUncorrectable;
741 case hipErrorUnsupportedLimit:
742 return cudaErrorUnsupportedLimit;
743 case hipErrorContextAlreadyInUse:
744 return cudaErrorDeviceAlreadyInUse;
745 case hipErrorPeerAccessUnsupported:
746 return cudaErrorPeerAccessUnsupported;
748 return cudaErrorInvalidPtx;
749 case hipErrorInvalidGraphicsContext:
750 return cudaErrorInvalidGraphicsContext;
751 case hipErrorInvalidSource:
752 #if CUDA_VERSION >= 10010
753 return cudaErrorInvalidSource;
755 return cudaErrorUnknown;
757 case hipErrorFileNotFound:
758 #if CUDA_VERSION >= 10010
759 return cudaErrorFileNotFound;
761 return cudaErrorUnknown;
763 case hipErrorSharedObjectSymbolNotFound:
764 return cudaErrorSharedObjectSymbolNotFound;
765 case hipErrorSharedObjectInitFailed:
766 return cudaErrorSharedObjectInitFailed;
767 case hipErrorOperatingSystem:
768 return cudaErrorOperatingSystem;
769 case hipErrorNotFound:
770 #if CUDA_VERSION >= 10010
771 return cudaErrorSymbolNotFound;
773 return cudaErrorUnknown;
775 case hipErrorIllegalAddress:
776 return cudaErrorIllegalAddress;
777 case hipErrorLaunchTimeOut:
778 return cudaErrorLaunchTimeout;
779 case hipErrorSetOnActiveProcess:
780 return cudaErrorSetOnActiveProcess;
782 return cudaErrorLaunchFailure;
784 return cudaErrorCooperativeLaunchTooLarge;
786 return cudaErrorNotSupported;
791 case hipErrorUnknown:
794 return cudaErrorUnknown;
798 inline static enum cudaMemcpyKind hipMemcpyKindToCudaMemcpyKind(hipMemcpyKind kind) {
800 case hipMemcpyHostToHost:
801 return cudaMemcpyHostToHost;
802 case hipMemcpyHostToDevice:
803 return cudaMemcpyHostToDevice;
804 case hipMemcpyDeviceToHost:
805 return cudaMemcpyDeviceToHost;
806 case hipMemcpyDeviceToDevice:
807 return cudaMemcpyDeviceToDevice;
809 return cudaMemcpyDefault;
813 inline static enum cudaTextureAddressMode hipTextureAddressModeToCudaTextureAddressMode(
814 hipTextureAddressMode kind) {
816 case hipAddressModeWrap:
817 return cudaAddressModeWrap;
818 case hipAddressModeClamp:
819 return cudaAddressModeClamp;
820 case hipAddressModeMirror:
821 return cudaAddressModeMirror;
822 case hipAddressModeBorder:
823 return cudaAddressModeBorder;
825 return cudaAddressModeWrap;
829 inline static enum cudaMemRangeAttribute hipMemRangeAttributeTocudaMemRangeAttribute(
833 return cudaMemRangeAttributeReadMostly;
835 return cudaMemRangeAttributePreferredLocation;
837 return cudaMemRangeAttributeAccessedBy;
839 return cudaMemRangeAttributeLastPrefetchLocation;
841 return cudaMemRangeAttributeReadMostly;
845 inline static enum cudaMemoryAdvise hipMemoryAdviseTocudaMemoryAdvise(
849 return cudaMemAdviseSetReadMostly;
851 return cudaMemAdviseUnsetReadMostly ;
853 return cudaMemAdviseSetPreferredLocation;
855 return cudaMemAdviseUnsetPreferredLocation;
857 return cudaMemAdviseSetAccessedBy;
859 return cudaMemAdviseUnsetAccessedBy;
861 return cudaMemAdviseSetReadMostly;
865 inline static enum cudaTextureFilterMode hipTextureFilterModeToCudaTextureFilterMode(
866 hipTextureFilterMode kind) {
868 case hipFilterModePoint:
869 return cudaFilterModePoint;
870 case hipFilterModeLinear:
871 return cudaFilterModeLinear;
873 return cudaFilterModePoint;
877 inline static enum cudaTextureReadMode hipTextureReadModeToCudaTextureReadMode(hipTextureReadMode kind) {
879 case hipReadModeElementType:
880 return cudaReadModeElementType;
881 case hipReadModeNormalizedFloat:
882 return cudaReadModeNormalizedFloat;
884 return cudaReadModeElementType;
888 inline static enum cudaChannelFormatKind hipChannelFormatKindToCudaChannelFormatKind(
889 hipChannelFormatKind kind) {
891 case hipChannelFormatKindSigned:
892 return cudaChannelFormatKindSigned;
893 case hipChannelFormatKindUnsigned:
894 return cudaChannelFormatKindUnsigned;
895 case hipChannelFormatKindFloat:
896 return cudaChannelFormatKindFloat;
897 case hipChannelFormatKindNone:
898 return cudaChannelFormatKindNone;
900 return cudaChannelFormatKindNone;
907 #define HIPRT_CB CUDART_CB
908 typedef void(HIPRT_CB*
hipStreamCallback_t)(hipStream_t stream, hipError_t status,
void* userData);
909 inline static hipError_t
hipInit(
unsigned int flags) {
910 return hipCUResultTohipError(cuInit(flags));
913 inline static hipError_t
hipDeviceReset() {
return hipCUDAErrorTohipError(cudaDeviceReset()); }
915 inline static hipError_t
hipGetLastError() {
return hipCUDAErrorTohipError(cudaGetLastError()); }
918 return hipCUDAErrorTohipError(cudaPeekAtLastError());
921 inline static hipError_t
hipMalloc(
void** ptr,
size_t size) {
922 return hipCUDAErrorTohipError(cudaMalloc(ptr, size));
925 inline static hipError_t
hipMallocPitch(
void** ptr,
size_t* pitch,
size_t width,
size_t height) {
926 return hipCUDAErrorTohipError(cudaMallocPitch(ptr, pitch, width, height));
929 inline static hipError_t
hipMemAllocPitch(hipDeviceptr_t* dptr,
size_t* pitch,
size_t widthInBytes,
size_t height,
unsigned int elementSizeBytes){
930 return hipCUResultTohipError(cuMemAllocPitch(dptr,pitch,widthInBytes,height,elementSizeBytes));
934 return hipCUDAErrorTohipError(cudaMalloc3D(pitchedDevPtr, extent));
937 inline static hipError_t
hipFree(
void* ptr) {
return hipCUDAErrorTohipError(cudaFree(ptr)); }
939 inline static hipError_t
hipMallocHost(
void** ptr,
size_t size)
940 __attribute__((deprecated(
"use hipHostMalloc instead")));
942 return hipCUDAErrorTohipError(cudaMallocHost(ptr, size));
946 __attribute__((deprecated(
"use hipHostMalloc instead")));
948 return hipCUResultTohipError(cuMemAllocHost(ptr, size));
951 inline static hipError_t
hipHostAlloc(
void** ptr,
size_t size,
unsigned int flags)
952 __attribute__((deprecated(
"use hipHostMalloc instead")));
953 inline static hipError_t
hipHostAlloc(
void** ptr,
size_t size,
unsigned int flags) {
954 return hipCUDAErrorTohipError(cudaHostAlloc(ptr, size, flags));
957 inline static hipError_t
hipHostMalloc(
void** ptr,
size_t size,
unsigned int flags) {
958 return hipCUDAErrorTohipError(cudaHostAlloc(ptr, size, flags));
963 return hipCUDAErrorTohipError(cudaMemAdvise(dev_ptr, count,
964 hipMemoryAdviseTocudaMemoryAdvise(advice), device));
967 inline static hipError_t
hipMemPrefetchAsync(
const void* dev_ptr,
size_t count,
int device,
968 hipStream_t stream __dparm(0)) {
969 return hipCUDAErrorTohipError(cudaMemPrefetchAsync(dev_ptr, count, device, stream));
974 const void* dev_ptr,
size_t count) {
975 return hipCUDAErrorTohipError(cudaMemRangeGetAttribute(data, data_size,
976 hipMemRangeAttributeTocudaMemRangeAttribute(attribute), dev_ptr, count));
981 size_t num_attributes,
const void* dev_ptr,
983 auto attrs = hipMemRangeAttributeTocudaMemRangeAttribute(*attributes);
984 return hipCUDAErrorTohipError(cudaMemRangeGetAttributes(data, data_sizes, &attrs,
985 num_attributes, dev_ptr, count));
989 size_t length __dparm(0),
991 return hipCUDAErrorTohipError(cudaStreamAttachMemAsync(stream, dev_ptr, length, flags));
994 inline static hipError_t
hipMallocManaged(
void** ptr,
size_t size,
unsigned int flags) {
995 return hipCUDAErrorTohipError(cudaMallocManaged(ptr, size, flags));
999 size_t width,
size_t height,
1001 return hipCUDAErrorTohipError(cudaMallocArray(array, desc, width, height, flags));
1006 return hipCUDAErrorTohipError(cudaMalloc3DArray(array, desc, extent, flags));
1010 return hipCUDAErrorTohipError(cudaFreeArray(array));
1014 return hipCUDAErrorTohipError(cudaHostGetDevicePointer(devPtr, hostPtr, flags));
1017 inline static hipError_t
hipHostGetFlags(
unsigned int* flagsPtr,
void* hostPtr) {
1018 return hipCUDAErrorTohipError(cudaHostGetFlags(flagsPtr, hostPtr));
1021 inline static hipError_t
hipHostRegister(
void* ptr,
size_t size,
unsigned int flags) {
1022 return hipCUDAErrorTohipError(cudaHostRegister(ptr, size, flags));
1026 return hipCUDAErrorTohipError(cudaHostUnregister(ptr));
1030 __attribute__((deprecated(
"use hipHostFree instead")));
1032 return hipCUDAErrorTohipError(cudaFreeHost(ptr));
1036 return hipCUDAErrorTohipError(cudaFreeHost(ptr));
1040 return hipCUDAErrorTohipError(cudaSetDevice(device));
1044 struct cudaDeviceProp cdprop;
1045 memset(&cdprop, 0x0,
sizeof(
struct cudaDeviceProp));
1046 cdprop.major = prop->
major;
1047 cdprop.minor = prop->
minor;
1062 return hipCUDAErrorTohipError(cudaChooseDevice(device, &cdprop));
1065 inline static hipError_t
hipMemcpyHtoD(hipDeviceptr_t dst,
void* src,
size_t size) {
1066 return hipCUResultTohipError(cuMemcpyHtoD(dst, src, size));
1069 inline static hipError_t
hipMemcpyDtoH(
void* dst, hipDeviceptr_t src,
size_t size) {
1070 return hipCUResultTohipError(cuMemcpyDtoH(dst, src, size));
1073 inline static hipError_t
hipMemcpyDtoD(hipDeviceptr_t dst, hipDeviceptr_t src,
size_t size) {
1074 return hipCUResultTohipError(cuMemcpyDtoD(dst, src, size));
1077 inline static hipError_t
hipMemcpyHtoDAsync(hipDeviceptr_t dst,
void* src,
size_t size,
1078 hipStream_t stream) {
1079 return hipCUResultTohipError(cuMemcpyHtoDAsync(dst, src, size, stream));
1082 inline static hipError_t
hipMemcpyDtoHAsync(
void* dst, hipDeviceptr_t src,
size_t size,
1083 hipStream_t stream) {
1084 return hipCUResultTohipError(cuMemcpyDtoHAsync(dst, src, size, stream));
1087 inline static hipError_t
hipMemcpyDtoDAsync(hipDeviceptr_t dst, hipDeviceptr_t src,
size_t size,
1088 hipStream_t stream) {
1089 return hipCUResultTohipError(cuMemcpyDtoDAsync(dst, src, size, stream));
1092 inline static hipError_t
hipMemcpy(
void* dst,
const void* src,
size_t sizeBytes,
1093 hipMemcpyKind copyKind) {
1094 return hipCUDAErrorTohipError(
1095 cudaMemcpy(dst, src, sizeBytes, hipMemcpyKindToCudaMemcpyKind(copyKind)));
1099 inline static hipError_t hipMemcpyWithStream(
void* dst,
const void* src,
1100 size_t sizeBytes, hipMemcpyKind copyKind,
1101 hipStream_t stream) {
1102 cudaError_t error = cudaMemcpyAsync(dst, src, sizeBytes,
1103 hipMemcpyKindToCudaMemcpyKind(copyKind),
1106 if (error != cudaSuccess)
return hipCUDAErrorTohipError(error);
1108 return hipCUDAErrorTohipError(cudaStreamSynchronize(stream));
1111 inline static hipError_t
hipMemcpyAsync(
void* dst,
const void* src,
size_t sizeBytes,
1112 hipMemcpyKind copyKind, hipStream_t stream __dparm(0)) {
1113 return hipCUDAErrorTohipError(
1114 cudaMemcpyAsync(dst, src, sizeBytes, hipMemcpyKindToCudaMemcpyKind(copyKind), stream));
1117 inline static hipError_t hipMemcpyToSymbol(
const void* symbol,
const void* src,
size_t sizeBytes,
1118 size_t offset __dparm(0),
1119 hipMemcpyKind copyType __dparm(hipMemcpyHostToDevice)) {
1120 return hipCUDAErrorTohipError(cudaMemcpyToSymbol(symbol, src, sizeBytes, offset,
1121 hipMemcpyKindToCudaMemcpyKind(copyType)));
1124 inline static hipError_t hipMemcpyToSymbolAsync(
const void* symbol,
const void* src,
1125 size_t sizeBytes,
size_t offset,
1126 hipMemcpyKind copyType,
1127 hipStream_t stream __dparm(0)) {
1128 return hipCUDAErrorTohipError(cudaMemcpyToSymbolAsync(
1129 symbol, src, sizeBytes, offset, hipMemcpyKindToCudaMemcpyKind(copyType), stream));
1132 inline static hipError_t hipMemcpyFromSymbol(
void* dst,
const void* symbolName,
size_t sizeBytes,
1133 size_t offset __dparm(0),
1134 hipMemcpyKind kind __dparm(hipMemcpyDeviceToHost)) {
1135 return hipCUDAErrorTohipError(cudaMemcpyFromSymbol(dst, symbolName, sizeBytes, offset,
1136 hipMemcpyKindToCudaMemcpyKind(kind)));
1139 inline static hipError_t hipMemcpyFromSymbolAsync(
void* dst,
const void* symbolName,
1140 size_t sizeBytes,
size_t offset,
1142 hipStream_t stream __dparm(0)) {
1143 return hipCUDAErrorTohipError(cudaMemcpyFromSymbolAsync(
1144 dst, symbolName, sizeBytes, offset, hipMemcpyKindToCudaMemcpyKind(kind), stream));
1147 inline static hipError_t hipGetSymbolAddress(
void** devPtr,
const void* symbolName) {
1148 return hipCUDAErrorTohipError(cudaGetSymbolAddress(devPtr, symbolName));
1151 inline static hipError_t hipGetSymbolSize(
size_t* size,
const void* symbolName) {
1152 return hipCUDAErrorTohipError(cudaGetSymbolSize(size, symbolName));
1155 inline static hipError_t
hipMemcpy2D(
void* dst,
size_t dpitch,
const void* src,
size_t spitch,
1156 size_t width,
size_t height, hipMemcpyKind kind) {
1157 return hipCUDAErrorTohipError(
1158 cudaMemcpy2D(dst, dpitch, src, spitch, width, height, hipMemcpyKindToCudaMemcpyKind(kind)));
1162 return hipCUResultTohipError(cuMemcpy2D(pCopy));
1166 return hipCUResultTohipError(cuMemcpy2DAsync(pCopy, stream));
1170 return hipCUDAErrorTohipError(cudaMemcpy3D(p));
1174 return hipCUDAErrorTohipError(cudaMemcpy3DAsync(p, stream));
1178 return hipCUResultTohipError(cuMemcpy3D(pCopy));
1182 return hipCUResultTohipError(cuMemcpy3DAsync(pCopy, stream));
1185 inline static hipError_t
hipMemcpy2DAsync(
void* dst,
size_t dpitch,
const void* src,
size_t spitch,
1186 size_t width,
size_t height, hipMemcpyKind kind,
1187 hipStream_t stream) {
1188 return hipCUDAErrorTohipError(cudaMemcpy2DAsync(dst, dpitch, src, spitch, width, height,
1189 hipMemcpyKindToCudaMemcpyKind(kind), stream));
1193 size_t wOffset,
size_t hOffset,
size_t width,
1194 size_t height, hipMemcpyKind kind) {
1195 return hipCUDAErrorTohipError(cudaMemcpy2DFromArray(dst, dpitch, src, wOffset, hOffset, width,
1197 hipMemcpyKindToCudaMemcpyKind(kind)));
1201 size_t wOffset,
size_t hOffset,
size_t width,
1202 size_t height, hipMemcpyKind kind,
1203 hipStream_t stream) {
1204 return hipCUDAErrorTohipError(cudaMemcpy2DFromArrayAsync(dst, dpitch, src, wOffset, hOffset,
1206 hipMemcpyKindToCudaMemcpyKind(kind),
1211 const void* src,
size_t spitch,
size_t width,
1212 size_t height, hipMemcpyKind kind) {
1213 return hipCUDAErrorTohipError(cudaMemcpy2DToArray(dst, wOffset, hOffset, src, spitch, width,
1214 height, hipMemcpyKindToCudaMemcpyKind(kind)));
1217 inline static hipError_t hipMemcpy2DToArrayAsync(
hipArray* dst,
size_t wOffset,
size_t hOffset,
1218 const void* src,
size_t spitch,
size_t width,
1219 size_t height, hipMemcpyKind kind,
1220 hipStream_t stream) {
1221 return hipCUDAErrorTohipError(cudaMemcpy2DToArrayAsync(dst, wOffset, hOffset, src, spitch,
1223 hipMemcpyKindToCudaMemcpyKind(kind),
1228 size_t hOffset,
const void* src,
1229 size_t count, hipMemcpyKind kind) {
1230 return hipCUDAErrorTohipError(
1231 cudaMemcpyToArray(dst, wOffset, hOffset, src, count, hipMemcpyKindToCudaMemcpyKind(kind)));
1235 size_t wOffset,
size_t hOffset,
1236 size_t count, hipMemcpyKind kind) {
1237 return hipCUDAErrorTohipError(cudaMemcpyFromArray(dst, srcArray, wOffset, hOffset, count,
1238 hipMemcpyKindToCudaMemcpyKind(kind)));
1243 return hipCUResultTohipError(cuMemcpyAtoH(dst, (CUarray)srcArray, srcOffset, count));
1248 return hipCUResultTohipError(cuMemcpyHtoA((CUarray)dstArray, dstOffset, srcHost, count));
1252 return hipCUDAErrorTohipError(cudaDeviceSynchronize());
1256 return hipCUDAErrorTohipError(cudaDeviceGetCacheConfig(pCacheConfig));
1260 return hipCUDAErrorTohipError(cudaFuncSetAttribute(func, attr, value));
1264 return hipCUDAErrorTohipError(cudaDeviceSetCacheConfig(cacheConfig));
1268 return hipCUDAErrorTohipError(cudaFuncSetSharedMemConfig(func, config));
1272 return cudaGetErrorString(hipErrorToCudaError(error));
1276 return cudaGetErrorName(hipErrorToCudaError(error));
1280 return hipCUDAErrorTohipError(cudaGetDeviceCount(count));
1284 return hipCUDAErrorTohipError(cudaGetDevice(device));
1288 return hipCUDAErrorTohipError(cudaIpcCloseMemHandle(devPtr));
1291 inline static hipError_t hipIpcGetEventHandle(
hipIpcEventHandle_t* handle, hipEvent_t event) {
1292 return hipCUDAErrorTohipError(cudaIpcGetEventHandle(handle, event));
1296 return hipCUDAErrorTohipError(cudaIpcGetMemHandle(handle, devPtr));
1299 inline static hipError_t hipIpcOpenEventHandle(hipEvent_t* event,
hipIpcEventHandle_t handle) {
1300 return hipCUDAErrorTohipError(cudaIpcOpenEventHandle(event, handle));
1304 unsigned int flags) {
1305 return hipCUDAErrorTohipError(cudaIpcOpenMemHandle(devPtr, handle, flags));
1308 inline static hipError_t
hipMemset(
void* devPtr,
int value,
size_t count) {
1309 return hipCUDAErrorTohipError(cudaMemset(devPtr, value, count));
1312 inline static hipError_t
hipMemsetD32(hipDeviceptr_t devPtr,
int value,
size_t count) {
1313 return hipCUResultTohipError(cuMemsetD32(devPtr, value, count));
1316 inline static hipError_t
hipMemsetAsync(
void* devPtr,
int value,
size_t count,
1317 hipStream_t stream __dparm(0)) {
1318 return hipCUDAErrorTohipError(cudaMemsetAsync(devPtr, value, count, stream));
1321 inline static hipError_t
hipMemsetD32Async(hipDeviceptr_t devPtr,
int value,
size_t count,
1322 hipStream_t stream __dparm(0)) {
1323 return hipCUResultTohipError(cuMemsetD32Async(devPtr, value, count, stream));
1326 inline static hipError_t
hipMemsetD8(hipDeviceptr_t dest,
unsigned char value,
size_t sizeBytes) {
1327 return hipCUResultTohipError(cuMemsetD8(dest, value, sizeBytes));
1330 inline static hipError_t
hipMemsetD8Async(hipDeviceptr_t dest,
unsigned char value,
size_t sizeBytes,
1331 hipStream_t stream __dparm(0)) {
1332 return hipCUResultTohipError(cuMemsetD8Async(dest, value, sizeBytes, stream));
1335 inline static hipError_t
hipMemsetD16(hipDeviceptr_t dest,
unsigned short value,
size_t sizeBytes) {
1336 return hipCUResultTohipError(cuMemsetD16(dest, value, sizeBytes));
1339 inline static hipError_t
hipMemsetD16Async(hipDeviceptr_t dest,
unsigned short value,
size_t sizeBytes,
1340 hipStream_t stream __dparm(0)) {
1341 return hipCUResultTohipError(cuMemsetD16Async(dest, value, sizeBytes, stream));
1344 inline static hipError_t
hipMemset2D(
void* dst,
size_t pitch,
int value,
size_t width,
size_t height) {
1345 return hipCUDAErrorTohipError(cudaMemset2D(dst, pitch, value, width, height));
1348 inline static hipError_t
hipMemset2DAsync(
void* dst,
size_t pitch,
int value,
size_t width,
size_t height, hipStream_t stream __dparm(0)) {
1349 return hipCUDAErrorTohipError(cudaMemset2DAsync(dst, pitch, value, width, height, stream));
1353 return hipCUDAErrorTohipError(cudaMemset3D(pitchedDevPtr, value, extent));
1357 return hipCUDAErrorTohipError(cudaMemset3DAsync(pitchedDevPtr, value, extent, stream));
1361 struct cudaDeviceProp cdprop;
1363 cerror = cudaGetDeviceProperties(&cdprop, device);
1365 strncpy(p_prop->
name, cdprop.name, 256);
1369 p_prop->
warpSize = cdprop.warpSize;
1371 for (
int i = 0; i < 3; i++) {
1379 p_prop->
major = cdprop.major;
1380 p_prop->
minor = cdprop.minor;
1387 int ccVers = p_prop->
major * 100 + p_prop->
minor * 10;
1408 p_prop->
pciBusID = cdprop.pciBusID;
1429 p_prop->
memPitch = cdprop.memPitch;
1436 return hipCUDAErrorTohipError(cerror);
1440 enum cudaDeviceAttr cdattr;
1445 cdattr = cudaDevAttrMaxThreadsPerBlock;
1448 cdattr = cudaDevAttrMaxBlockDimX;
1451 cdattr = cudaDevAttrMaxBlockDimY;
1454 cdattr = cudaDevAttrMaxBlockDimZ;
1457 cdattr = cudaDevAttrMaxGridDimX;
1460 cdattr = cudaDevAttrMaxGridDimY;
1463 cdattr = cudaDevAttrMaxGridDimZ;
1466 cdattr = cudaDevAttrMaxSharedMemoryPerBlock;
1469 cdattr = cudaDevAttrTotalConstantMemory;
1472 cdattr = cudaDevAttrWarpSize;
1475 cdattr = cudaDevAttrMaxRegistersPerBlock;
1478 cdattr = cudaDevAttrClockRate;
1481 cdattr = cudaDevAttrMemoryClockRate;
1484 cdattr = cudaDevAttrGlobalMemoryBusWidth;
1487 cdattr = cudaDevAttrMultiProcessorCount;
1490 cdattr = cudaDevAttrComputeMode;
1493 cdattr = cudaDevAttrL2CacheSize;
1496 cdattr = cudaDevAttrMaxThreadsPerMultiProcessor;
1499 cdattr = cudaDevAttrComputeCapabilityMajor;
1502 cdattr = cudaDevAttrComputeCapabilityMinor;
1505 cdattr = cudaDevAttrConcurrentKernels;
1508 cdattr = cudaDevAttrPciBusId;
1511 cdattr = cudaDevAttrPciDeviceId;
1514 cdattr = cudaDevAttrMaxSharedMemoryPerMultiprocessor;
1517 cdattr = cudaDevAttrIsMultiGpuBoard;
1520 cdattr = cudaDevAttrIntegrated;
1523 cdattr = cudaDevAttrMaxTexture1DWidth;
1526 cdattr = cudaDevAttrMaxTexture2DWidth;
1529 cdattr = cudaDevAttrMaxTexture2DHeight;
1532 cdattr = cudaDevAttrMaxTexture3DWidth;
1535 cdattr = cudaDevAttrMaxTexture3DHeight;
1538 cdattr = cudaDevAttrMaxTexture3DDepth;
1541 cdattr = cudaDevAttrMaxPitch;
1544 cdattr = cudaDevAttrTextureAlignment;
1547 cdattr = cudaDevAttrTexturePitchAlignment;
1550 cdattr = cudaDevAttrKernelExecTimeout;
1553 cdattr = cudaDevAttrCanMapHostMemory;
1556 cdattr = cudaDevAttrEccEnabled;
1559 cdattr = cudaDevAttrCooperativeLaunch;
1562 cdattr = cudaDevAttrCooperativeMultiDeviceLaunch;
1565 return hipCUDAErrorTohipError(cudaErrorInvalidValue);
1568 cerror = cudaDeviceGetAttribute(pi, cdattr, device);
1570 return hipCUDAErrorTohipError(cerror);
1576 size_t dynamicSMemSize) {
1577 return hipCUDAErrorTohipError(cudaOccupancyMaxActiveBlocksPerMultiprocessor(numBlocks, func,
1578 blockSize, dynamicSMemSize));
1584 size_t dynamicSMemSize,
1585 unsigned int flags) {
1586 return hipCUDAErrorTohipError(cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(numBlocks, func,
1587 blockSize, dynamicSMemSize, flags));
1593 size_t dynamicSMemSize ){
1594 return hipCUResultTohipError(cuOccupancyMaxActiveBlocksPerMultiprocessor(numBlocks, f,
1595 blockSize, dynamicSMemSize));
1601 size_t dynamicSMemSize,
1602 unsigned int flags ) {
1603 return hipCUResultTohipError(cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(numBlocks,f,
1604 blockSize, dynamicSMemSize, flags));
1609 hipFunction_t f,
size_t dynSharedMemPerBlk,
1610 int blockSizeLimit){
1611 return hipCUResultTohipError(cuOccupancyMaxPotentialBlockSize(gridSize, blockSize, f, NULL,
1612 dynSharedMemPerBlk, blockSizeLimit));
1617 hipFunction_t f,
size_t dynSharedMemPerBlk,
1618 int blockSizeLimit,
unsigned int flags){
1619 return hipCUResultTohipError(cuOccupancyMaxPotentialBlockSizeWithFlags(gridSize, blockSize, f, NULL,
1620 dynSharedMemPerBlk, blockSizeLimit, flags));
1624 struct cudaPointerAttributes cPA;
1625 hipError_t err = hipCUDAErrorTohipError(cudaPointerGetAttributes(&cPA, ptr));
1627 #if (CUDART_VERSION >= 11000)
1628 auto memType = cPA.type;
1630 unsigned memType = cPA.memoryType;
1633 case cudaMemoryTypeDevice:
1636 case cudaMemoryTypeHost:
1640 return hipErrorUnknown;
1642 attributes->device = cPA.device;
1643 attributes->devicePointer = cPA.devicePointer;
1644 attributes->hostPointer = cPA.hostPointer;
1645 attributes->isManaged = 0;
1646 attributes->allocationFlags = 0;
1651 inline static hipError_t
hipMemGetInfo(
size_t* free,
size_t* total) {
1652 return hipCUDAErrorTohipError(cudaMemGetInfo(free, total));
1656 return hipCUDAErrorTohipError(cudaEventCreate(event));
1659 inline static hipError_t
hipEventRecord(hipEvent_t event, hipStream_t stream __dparm(NULL)) {
1660 return hipCUDAErrorTohipError(cudaEventRecord(event, stream));
1664 return hipCUDAErrorTohipError(cudaEventSynchronize(event));
1667 inline static hipError_t
hipEventElapsedTime(
float* ms, hipEvent_t start, hipEvent_t stop) {
1668 return hipCUDAErrorTohipError(cudaEventElapsedTime(ms, start, stop));
1672 return hipCUDAErrorTohipError(cudaEventDestroy(event));
1676 return hipCUDAErrorTohipError(cudaStreamCreateWithFlags(stream, flags));
1680 return hipCUDAErrorTohipError(cudaStreamCreateWithPriority(stream, flags, priority));
1684 return hipCUDAErrorTohipError(cudaDeviceGetStreamPriorityRange(leastPriority, greatestPriority));
1688 return hipCUDAErrorTohipError(cudaStreamCreate(stream));
1692 return hipCUDAErrorTohipError(cudaStreamSynchronize(stream));
1696 return hipCUDAErrorTohipError(cudaStreamDestroy(stream));
1699 inline static hipError_t
hipStreamGetFlags(hipStream_t stream,
unsigned int *flags) {
1700 return hipCUDAErrorTohipError(cudaStreamGetFlags(stream, flags));
1704 return hipCUDAErrorTohipError(cudaStreamGetPriority(stream, priority));
1708 unsigned int flags) {
1709 return hipCUDAErrorTohipError(cudaStreamWaitEvent(stream, event, flags));
1713 return hipCUDAErrorTohipError(cudaStreamQuery(stream));
1717 void* userData,
unsigned int flags) {
1718 return hipCUDAErrorTohipError(
1719 cudaStreamAddCallback(stream, (cudaStreamCallback_t)callback, userData, flags));
1723 cudaError_t err = cudaDriverGetVersion(driverVersion);
1728 return hipCUDAErrorTohipError(err);
1732 return hipCUDAErrorTohipError(cudaRuntimeGetVersion(runtimeVersion));
1736 return hipCUDAErrorTohipError(cudaDeviceCanAccessPeer(canAccessPeer, device, peerDevice));
1740 return hipCUDAErrorTohipError(cudaDeviceDisablePeerAccess(peerDevice));
1744 return hipCUDAErrorTohipError(cudaDeviceEnablePeerAccess(peerDevice, flags));
1748 return hipCUResultTohipError(cuCtxDisablePeerAccess(peerCtx));
1752 return hipCUResultTohipError(cuCtxEnablePeerAccess(peerCtx, flags));
1757 return hipCUResultTohipError(cuDevicePrimaryCtxGetState(dev, flags, active));
1761 return hipCUResultTohipError(cuDevicePrimaryCtxRelease(dev));
1765 return hipCUResultTohipError(cuDevicePrimaryCtxRetain(pctx, dev));
1769 return hipCUResultTohipError(cuDevicePrimaryCtxReset(dev));
1773 return hipCUResultTohipError(cuDevicePrimaryCtxSetFlags(dev, flags));
1777 hipDeviceptr_t dptr) {
1778 return hipCUResultTohipError(cuMemGetAddressRange(pbase, psize, dptr));
1781 inline static hipError_t
hipMemcpyPeer(
void* dst,
int dstDevice,
const void* src,
int srcDevice,
1783 return hipCUDAErrorTohipError(cudaMemcpyPeer(dst, dstDevice, src, srcDevice, count));
1786 inline static hipError_t
hipMemcpyPeerAsync(
void* dst,
int dstDevice,
const void* src,
1787 int srcDevice,
size_t count,
1788 hipStream_t stream __dparm(0)) {
1789 return hipCUDAErrorTohipError(
1790 cudaMemcpyPeerAsync(dst, dstDevice, src, srcDevice, count, stream));
1794 inline static hipError_t
hipProfilerStart() {
return hipCUDAErrorTohipError(cudaProfilerStart()); }
1796 inline static hipError_t
hipProfilerStop() {
return hipCUDAErrorTohipError(cudaProfilerStop()); }
1799 return hipCUDAErrorTohipError(cudaGetDeviceFlags(flags));
1803 return hipCUDAErrorTohipError(cudaSetDeviceFlags(flags));
1807 return hipCUDAErrorTohipError(cudaEventCreateWithFlags(event, flags));
1811 return hipCUDAErrorTohipError(cudaEventQuery(event));
1814 inline static hipError_t
hipCtxCreate(hipCtx_t* ctx,
unsigned int flags, hipDevice_t device) {
1815 return hipCUResultTohipError(cuCtxCreate(ctx, flags, device));
1819 return hipCUResultTohipError(cuCtxDestroy(ctx));
1823 return hipCUResultTohipError(cuCtxPopCurrent(ctx));
1827 return hipCUResultTohipError(cuCtxPushCurrent(ctx));
1831 return hipCUResultTohipError(cuCtxSetCurrent(ctx));
1835 return hipCUResultTohipError(cuCtxGetCurrent(ctx));
1839 return hipCUResultTohipError(cuCtxGetDevice(device));
1843 return hipCUResultTohipError(cuCtxGetApiVersion(ctx, (
unsigned int*)apiVersion));
1847 return hipCUResultTohipError(cuCtxGetCacheConfig(cacheConfig));
1851 return hipCUResultTohipError(cuCtxSetCacheConfig(cacheConfig));
1855 return hipCUResultTohipError(cuCtxSetSharedMemConfig((CUsharedconfig)config));
1859 return hipCUResultTohipError(cuCtxGetSharedMemConfig((CUsharedconfig*)pConfig));
1863 return hipCUResultTohipError(cuCtxSynchronize());
1867 return hipCUResultTohipError(cuCtxGetFlags(flags));
1870 inline static hipError_t hipCtxDetach(hipCtx_t ctx) {
1871 return hipCUResultTohipError(cuCtxDetach(ctx));
1874 inline static hipError_t
hipDeviceGet(hipDevice_t* device,
int ordinal) {
1875 return hipCUResultTohipError(cuDeviceGet(device, ordinal));
1879 return hipCUResultTohipError(cuDeviceComputeCapability(major, minor, device));
1882 inline static hipError_t
hipDeviceGetName(
char* name,
int len, hipDevice_t device) {
1883 return hipCUResultTohipError(cuDeviceGetName(name, len, device));
1887 int srcDevice,
int dstDevice) {
1888 return hipCUDAErrorTohipError(cudaDeviceGetP2PAttribute(value, attr, srcDevice, dstDevice));
1892 return hipCUDAErrorTohipError(cudaDeviceGetPCIBusId(pciBusId, len, device));
1896 return hipCUDAErrorTohipError(cudaDeviceGetByPCIBusId(device, pciBusId));
1900 return hipCUDAErrorTohipError(cudaDeviceGetSharedMemConfig(config));
1904 return hipCUDAErrorTohipError(cudaDeviceSetSharedMemConfig(config));
1908 return hipCUDAErrorTohipError(cudaDeviceGetLimit(pValue, limit));
1912 return hipCUResultTohipError(cuDeviceTotalMem(bytes, device));
1915 inline static hipError_t
hipModuleLoad(hipModule_t* module,
const char* fname) {
1916 return hipCUResultTohipError(cuModuleLoad(module, fname));
1920 return hipCUResultTohipError(cuModuleUnload(hmod));
1924 const char* kname) {
1925 return hipCUResultTohipError(cuModuleGetFunction(
function, module, kname));
1928 inline static hipError_t
hipModuleGetTexRef(hipTexRef* pTexRef, hipModule_t hmod,
const char* name){
1929 hipCUResultTohipError(cuModuleGetTexRef(pTexRef, hmod, name));
1933 return hipCUDAErrorTohipError(cudaFuncGetAttributes(attr, func));
1936 inline static hipError_t
hipFuncGetAttribute (
int* value, hipFunction_attribute attrib, hipFunction_t hfunc) {
1937 return hipCUResultTohipError(cuFuncGetAttribute(value, attrib, hfunc));
1940 inline static hipError_t hipModuleGetGlobal(hipDeviceptr_t* dptr,
size_t* bytes, hipModule_t hmod,
1942 return hipCUResultTohipError(cuModuleGetGlobal(dptr, bytes, hmod, name));
1945 inline static hipError_t
hipModuleLoadData(hipModule_t* module,
const void* image) {
1946 return hipCUResultTohipError(cuModuleLoadData(module, image));
1950 unsigned int numOptions, hipJitOption* options,
1951 void** optionValues) {
1952 return hipCUResultTohipError(
1953 cuModuleLoadDataEx(module, image, numOptions, options, optionValues));
1957 dim3 dimBlocks,
void** args,
size_t sharedMemBytes,
1960 return hipCUDAErrorTohipError(cudaLaunchKernel(function_address,numBlocks,dimBlocks,args,sharedMemBytes,stream));
1964 unsigned int gridDimY,
unsigned int gridDimZ,
1965 unsigned int blockDimX,
unsigned int blockDimY,
1966 unsigned int blockDimZ,
unsigned int sharedMemBytes,
1967 hipStream_t stream,
void** kernelParams,
1969 return hipCUResultTohipError(cuLaunchKernel(f, gridDimX, gridDimY, gridDimZ, blockDimX,
1970 blockDimY, blockDimZ, sharedMemBytes, stream,
1971 kernelParams, extra));
1975 return hipCUDAErrorTohipError(cudaFuncSetCacheConfig(func, cacheConfig));
1978 __HIP_DEPRECATED
inline static hipError_t hipBindTexture(
size_t* offset,
1982 size_t size __dparm(UINT_MAX)) {
1983 return hipCUDAErrorTohipError(cudaBindTexture(offset, tex, devPtr, desc, size));
1986 __HIP_DEPRECATED
inline static hipError_t hipBindTexture2D(
1989 return hipCUDAErrorTohipError(cudaBindTexture2D(offset, tex, devPtr, desc, width, height, pitch));
1993 hipChannelFormatKind f) {
1994 return cudaCreateChannelDesc(x, y, z, w, hipChannelFormatKindToCudaChannelFormatKind(f));
1997 inline static hipError_t hipCreateTextureObject(hipTextureObject_t* pTexObject,
2001 return hipCUDAErrorTohipError(
2002 cudaCreateTextureObject(pTexObject, pResDesc, pTexDesc, pResViewDesc));
2005 inline static hipError_t hipDestroyTextureObject(hipTextureObject_t textureObject) {
2006 return hipCUDAErrorTohipError(cudaDestroyTextureObject(textureObject));
2011 return hipCUDAErrorTohipError(cudaCreateSurfaceObject(pSurfObject, pResDesc));
2015 return hipCUDAErrorTohipError(cudaDestroySurfaceObject(surfaceObject));
2018 inline static hipError_t hipGetTextureObjectResourceDesc(
hipResourceDesc* pResDesc,
2019 hipTextureObject_t textureObject) {
2020 return hipCUDAErrorTohipError(cudaGetTextureObjectResourceDesc( pResDesc, textureObject));
2023 __HIP_DEPRECATED
inline static hipError_t hipGetTextureAlignmentOffset(
2025 return hipCUDAErrorTohipError(cudaGetTextureAlignmentOffset(offset,texref));
2030 return hipCUDAErrorTohipError(cudaGetChannelDesc(desc,array));
2034 void** kernelParams,
unsigned int sharedMemBytes,
2035 hipStream_t stream) {
2036 return hipCUDAErrorTohipError(
2037 cudaLaunchCooperativeKernel(f, gridDim, blockDim, kernelParams, sharedMemBytes, stream));
2041 int numDevices,
unsigned int flags) {
2042 return hipCUDAErrorTohipError(cudaLaunchCooperativeKernelMultiDevice(launchParamsList, numDevices, flags));
2055 size_t dynamicSMemSize) {
2056 return hipCUDAErrorTohipError(cudaOccupancyMaxActiveBlocksPerMultiprocessor(numBlocks, func,
2057 blockSize, dynamicSMemSize));
2062 size_t dynamicSMemSize = 0,
2063 int blockSizeLimit = 0) {
2064 return hipCUDAErrorTohipError(cudaOccupancyMaxPotentialBlockSize(minGridSize, blockSize, func,
2065 dynamicSMemSize, blockSizeLimit));
2069 inline static hipError_t hipOccupancyMaxPotentialBlockSizeWithFlags(
int* minGridSize,
int* blockSize, T func,
2070 size_t dynamicSMemSize = 0,
2071 int blockSizeLimit = 0,
unsigned int flags = 0) {
2072 return hipCUDAErrorTohipError(cudaOccupancyMaxPotentialBlockSize(minGridSize, blockSize, func,
2073 dynamicSMemSize, blockSizeLimit, flags));
2078 int blockSize,
size_t dynamicSMemSize,
unsigned int flags) {
2079 return hipCUDAErrorTohipError(cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(numBlocks, func,
2080 blockSize, dynamicSMemSize, flags));
2083 template <
class T,
int dim, enum cudaTextureReadMode readMode>
2084 inline static hipError_t hipBindTexture(
size_t* offset,
const struct texture<T, dim, readMode>& tex,
2085 const void* devPtr,
size_t size = UINT_MAX) {
2086 return hipCUDAErrorTohipError(cudaBindTexture(offset, tex, devPtr, size));
2089 template <
class T,
int dim, enum cudaTextureReadMode readMode>
2090 inline static hipError_t hipBindTexture(
size_t* offset,
struct texture<T, dim, readMode>& tex,
2092 size_t size = UINT_MAX) {
2093 return hipCUDAErrorTohipError(cudaBindTexture(offset, tex, devPtr, desc, size));
2096 template <
class T,
int dim, enum cudaTextureReadMode readMode>
2097 __HIP_DEPRECATED
inline static hipError_t hipUnbindTexture(
struct texture<T, dim, readMode>* tex) {
2098 return hipCUDAErrorTohipError(cudaUnbindTexture(tex));
2101 template <
class T,
int dim, enum cudaTextureReadMode readMode>
2102 __HIP_DEPRECATED
inline static hipError_t hipUnbindTexture(
struct texture<T, dim, readMode>& tex) {
2103 return hipCUDAErrorTohipError(cudaUnbindTexture(tex));
2106 template <
class T,
int dim, enum cudaTextureReadMode readMode>
2107 __HIP_DEPRECATED
inline static hipError_t hipBindTextureToArray(
2110 return hipCUDAErrorTohipError(cudaBindTextureToArray(tex, array, desc));
2113 template <
class T,
int dim, enum cudaTextureReadMode readMode>
2114 __HIP_DEPRECATED
inline static hipError_t hipBindTextureToArray(
2117 return hipCUDAErrorTohipError(cudaBindTextureToArray(tex, array, desc));
2120 template <
class T,
int dim, enum cudaTextureReadMode readMode>
2121 __HIP_DEPRECATED
inline static hipError_t hipBindTextureToArray(
2123 return hipCUDAErrorTohipError(cudaBindTextureToArray(tex, array));
2128 return cudaCreateChannelDesc<T>();
2133 void** kernelParams,
unsigned int sharedMemBytes, hipStream_t stream) {
2134 return hipCUDAErrorTohipError(
2135 cudaLaunchCooperativeKernel(
reinterpret_cast<const void*
>(f), gridDim, blockDim, kernelParams, sharedMemBytes, stream));
2138 inline static hipError_t hipTexRefSetAddressMode(hipTexRef hTexRef,
int dim, hipAddress_mode am){
2139 return hipCUResultTohipError(cuTexRefSetAddressMode(hTexRef,dim,am));
2142 inline static hipError_t hipTexRefSetFilterMode(hipTexRef hTexRef, hipFilter_mode fm){
2143 return hipCUResultTohipError(cuTexRefSetFilterMode(hTexRef,fm));
2146 inline static hipError_t hipTexRefSetAddress(
size_t *ByteOffset, hipTexRef hTexRef, hipDeviceptr_t dptr,
size_t bytes){
2147 return hipCUResultTohipError(cuTexRefSetAddress(ByteOffset,hTexRef,dptr,bytes));
2150 inline static hipError_t hipTexRefSetAddress2D(hipTexRef hTexRef,
const CUDA_ARRAY_DESCRIPTOR *desc, hipDeviceptr_t dptr,
size_t Pitch){
2151 return hipCUResultTohipError(cuTexRefSetAddress2D(hTexRef,desc,dptr,Pitch));
2154 inline static hipError_t hipTexRefSetFormat(hipTexRef hTexRef, hipArray_Format fmt,
int NumPackedComponents){
2155 return hipCUResultTohipError(cuTexRefSetFormat(hTexRef,fmt,NumPackedComponents));
2158 inline static hipError_t hipTexRefSetFlags(hipTexRef hTexRef,
unsigned int Flags){
2159 return hipCUResultTohipError(cuTexRefSetFlags(hTexRef,Flags));
2162 inline static hipError_t hipTexRefSetArray(hipTexRef hTexRef,
hiparray hArray,
unsigned int Flags){
2163 return hipCUResultTohipError(cuTexRefSetArray(hTexRef,hArray,Flags));
2167 return hipCUResultTohipError(cuArrayCreate(pHandle, pAllocateArray));
2170 inline static hipError_t hipArrayDestroy(
hiparray hArray){
2171 return hipCUResultTohipError(cuArrayDestroy(hArray));
2174 inline static hipError_t hipArray3DCreate(
hiparray* pHandle,
2176 return hipCUResultTohipError(cuArray3DCreate(pHandle, pAllocateArray));
2181 #endif // HIP_INCLUDE_HIP_NVIDIA_DETAIL_HIP_RUNTIME_API_H