29 #ifndef HIP_INCLUDE_HIP_HCC_DETAIL_HIP_RUNTIME_H
30 #define HIP_INCLUDE_HIP_HCC_DETAIL_HIP_RUNTIME_H
32 #include <hip/hcc_detail/hip_common.h>
48 #ifndef __HIP_ENABLE_DEVICE_MALLOC__
49 #define __HIP_ENABLE_DEVICE_MALLOC__ 0
52 #if __HCC_OR_HIP_CLANG__
55 #if !defined(__align__)
56 #define __align__(x) __attribute__((aligned(x)))
60 #define CUDA_SUCCESS hipSuccess
63 #endif // __HCC_OR_HIP_CLANG__
67 #ifdef HIP_ENABLE_PRINTF
68 #define HCC_ENABLE_ACCELERATOR_PRINTF 1
74 #include "grid_launch.h"
75 #include "hc_printf.hpp"
79 #if GENERIC_GRID_LAUNCH == 0
80 #define hipLaunchParm grid_launch_parm
83 struct Empty_launch_parm {};
85 #define hipLaunchParm hip_impl::Empty_launch_parm
86 #endif // GENERIC_GRID_LAUNCH
88 #if defined(GRID_LAUNCH_VERSION) and (GRID_LAUNCH_VERSION >= 20) || GENERIC_GRID_LAUNCH == 1
89 #else // Use field names for grid_launch 2.0 structure, if HCC supports GL 2.0.
90 #error(HCC must support GRID_LAUNCH_20)
91 #endif // GRID_LAUNCH_VERSION
95 #if GENERIC_GRID_LAUNCH == 1 && defined __HCC__
96 #include "grid_launch_GGL.hpp"
97 #endif // GENERIC_GRID_LAUNCH
101 #if __HCC_OR_HIP_CLANG__
102 extern int HIP_TRACE_API;
105 #include <hip/hcc_detail/hip_ldg.h>
107 #include <hip/hcc_detail/hip_atomic.h>
109 #include <hip/hcc_detail/device_functions.h>
110 #include <hip/hcc_detail/surface_functions.h>
112 #include <hip/hcc_detail/math_functions.h>
113 #include <hip/hcc_detail/texture_functions.h>
115 #include <hip/hcc_detail/texture_fetch_functions.h>
116 #include <hip/hcc_detail/texture_indirect_functions.h>
119 #if defined(__KALMAR_ACCELERATOR__) && !defined(__HCC_ACCELERATOR__)
120 #define __HCC_ACCELERATOR__ __KALMAR_ACCELERATOR__
124 #if defined(__HCC__) && __HIP_DEVICE_COMPILE__ == 1
126 #define assert(COND) \
136 #if (defined(__HCC_ACCELERATOR__) && (__HCC_ACCELERATOR__ != 0)) || __HIP_DEVICE_COMPILE__
140 #define __HIP_ARCH_HAS_GLOBAL_INT32_ATOMICS__ (1)
141 #define __HIP_ARCH_HAS_GLOBAL_FLOAT_ATOMIC_EXCH__ (1)
142 #define __HIP_ARCH_HAS_SHARED_INT32_ATOMICS__ (1)
143 #define __HIP_ARCH_HAS_SHARED_FLOAT_ATOMIC_EXCH__ (1)
144 #define __HIP_ARCH_HAS_FLOAT_ATOMIC_ADD__ (1)
147 #define __HIP_ARCH_HAS_GLOBAL_INT64_ATOMICS__ (1)
148 #define __HIP_ARCH_HAS_SHARED_INT64_ATOMICS__ (0)
151 #define __HIP_ARCH_HAS_DOUBLES__ (1)
154 #define __HIP_ARCH_HAS_WARP_VOTE__ (1)
155 #define __HIP_ARCH_HAS_WARP_BALLOT__ (1)
156 #define __HIP_ARCH_HAS_WARP_SHUFFLE__ (1)
157 #define __HIP_ARCH_HAS_WARP_FUNNEL_SHIFT__ (0)
160 #define __HIP_ARCH_HAS_THREAD_FENCE_SYSTEM__ (1)
161 #define __HIP_ARCH_HAS_SYNC_THREAD_EXT__ (0)
164 #define __HIP_ARCH_HAS_SURFACE_FUNCS__ (0)
165 #define __HIP_ARCH_HAS_3DGRID__ (1)
166 #define __HIP_ARCH_HAS_DYNAMIC_PARALLEL__ (0)
171 #define launch_bounds_impl0(requiredMaxThreadsPerBlock) \
172 __attribute__((amdgpu_flat_work_group_size(1, requiredMaxThreadsPerBlock)))
173 #define launch_bounds_impl1(requiredMaxThreadsPerBlock, minBlocksPerMultiprocessor) \
174 __attribute__((amdgpu_flat_work_group_size(1, requiredMaxThreadsPerBlock), \
175 amdgpu_waves_per_eu(minBlocksPerMultiprocessor)))
176 #define select_impl_(_1, _2, impl_, ...) impl_
177 #define __launch_bounds__(...) \
178 select_impl_(__VA_ARGS__, launch_bounds_impl1, launch_bounds_impl0)(__VA_ARGS__)
181 #if defined(__cplusplus)
183 #elif defined(__STDC_VERSION__)
187 __host__ inline void* __get_dynamicgroupbaseptr() {
return nullptr; }
189 #if __HIP_ARCH_GFX701__ == 0
191 __device__
unsigned __hip_ds_bpermute(
int index,
unsigned src);
192 __device__
float __hip_ds_bpermutef(
int index,
float src);
193 __device__
unsigned __hip_ds_permute(
int index,
unsigned src);
194 __device__
float __hip_ds_permutef(
int index,
float src);
196 template <
int pattern>
197 __device__
unsigned __hip_ds_swizzle_N(
unsigned int src);
198 template <
int pattern>
199 __device__
float __hip_ds_swizzlef_N(
float src);
201 template <
int dpp_ctrl,
int row_mask,
int bank_mask,
bool bound_ctrl>
202 __device__
int __hip_move_dpp_N(
int src);
204 #endif //__HIP_ARCH_GFX803__ == 1
206 #endif // __HCC_OR_HIP_CLANG__
212 using R = decltype(hc_get_group_id(0));
215 R operator()(std::uint32_t x)
const noexcept {
return hc_get_group_id(x); }
218 using R = decltype(hc_get_group_size(0));
221 R operator()(std::uint32_t x)
const noexcept {
222 return hc_get_group_size(x);
226 using R = decltype(hc_get_num_groups(0));
229 R operator()(std::uint32_t x)
const noexcept {
230 return hc_get_num_groups(x);
234 using R = decltype(hc_get_workitem_id(0));
237 R operator()(std::uint32_t x)
const noexcept {
238 return hc_get_workitem_id(x);
243 template <
typename F>
245 using R = decltype(F{}(0));
247 struct X { __device__
operator R()
const noexcept {
return F{}(0); } };
248 struct Y { __device__
operator R()
const noexcept {
return F{}(1); } };
249 struct Z { __device__
operator R()
const noexcept {
return F{}(2); } };
251 static constexpr
X x{};
252 static constexpr Y y{};
253 static constexpr Z z{};
260 return hc_get_grid_size(0);
266 return hc_get_grid_size(0);
272 return hc_get_grid_size(1);
278 return hc_get_grid_size(1);
284 return hc_get_grid_size(2);
290 return hc_get_grid_size(2);
298 #define hipThreadIdx_x (hc_get_workitem_id(0))
299 #define hipThreadIdx_y (hc_get_workitem_id(1))
300 #define hipThreadIdx_z (hc_get_workitem_id(2))
302 #define hipBlockIdx_x (hc_get_group_id(0))
303 #define hipBlockIdx_y (hc_get_group_id(1))
304 #define hipBlockIdx_z (hc_get_group_id(2))
306 #define hipBlockDim_x (hc_get_group_size(0))
307 #define hipBlockDim_y (hc_get_group_size(1))
308 #define hipBlockDim_z (hc_get_group_size(2))
310 #define hipGridDim_x (hc_get_num_groups(0))
311 #define hipGridDim_y (hc_get_num_groups(1))
312 #define hipGridDim_z (hc_get_num_groups(2))
314 #endif // defined __HCC__
315 #if __HCC_OR_HIP_CLANG__
316 #if !__CLANG_HIP_RUNTIME_WRAPPER_INCLUDED__
317 #if __HIP_ENABLE_DEVICE_MALLOC__
318 extern "C" __device__
void* __hip_malloc(
size_t);
319 extern "C" __device__
void* __hip_free(
void* ptr);
320 static inline __device__
void* malloc(
size_t size) {
return __hip_malloc(size); }
321 static inline __device__
void* free(
void* ptr) {
return __hip_free(ptr); }
323 static inline __device__
void* malloc(
size_t size) { __builtin_trap();
return nullptr; }
324 static inline __device__
void* free(
void* ptr) { __builtin_trap();
return nullptr; }
326 #endif // !__CLANG_HIP_RUNTIME_WRAPPER_INCLUDED__
327 #endif //__HCC_OR_HIP_CLANG__
331 #define __syncthreads() hc_barrier(CLK_LOCAL_MEM_FENCE)
333 #define HIP_KERNEL_NAME(...) (__VA_ARGS__)
334 #define HIP_SYMBOL(X) #X
336 #if defined __HCC_CPP__
347 #if GENERIC_GRID_LAUNCH == 0
351 #define hipLaunchKernel(_kernelName, _numBlocks3D, _blockDim3D, _groupMemBytes, _stream, ...) \
353 grid_launch_parm lp; \
354 lp.dynamic_group_mem_bytes = _groupMemBytes; \
355 hipStream_t trueStream = \
356 (ihipPreLaunchKernel(_stream, _numBlocks3D, _blockDim3D, &lp, #_kernelName)); \
357 _kernelName(lp, ##__VA_ARGS__); \
358 ihipPostLaunchKernel(#_kernelName, trueStream, lp); \
360 #endif // GENERIC_GRID_LAUNCH
362 #elif defined(__HCC_C__)
389 #elif defined(__clang__) && defined(__HIP__)
391 #define HIP_KERNEL_NAME(...) __VA_ARGS__
392 #define HIP_SYMBOL(X) X
394 typedef int hipLaunchParm;
396 template <std::size_t n,
typename... Ts,
397 typename std::enable_if<n ==
sizeof...(Ts)>::type* =
nullptr>
398 void pArgs(
const std::tuple<Ts...>&,
void*) {}
400 template <std::size_t n,
typename... Ts,
401 typename std::enable_if<n !=
sizeof...(Ts)>::type* =
nullptr>
402 void pArgs(
const std::tuple<Ts...>& formals,
void** _vargs) {
403 using T =
typename std::tuple_element<n, std::tuple<Ts...> >::type;
405 static_assert(!std::is_reference<T>{},
406 "A __global__ function cannot have a reference as one of its "
408 #if defined(HIP_STRICT)
409 static_assert(std::is_trivially_copyable<T>{},
410 "Only TriviallyCopyable types can be arguments to a __global__ "
413 _vargs[n] =
const_cast<void*
>(
reinterpret_cast<const void*
>(&std::get<n>(formals)));
414 return pArgs<n + 1>(formals, _vargs);
417 template <
typename... Formals,
typename... Actuals>
418 std::tuple<Formals...> validateArgsCountType(
void (*kernel)(Formals...), std::tuple<Actuals...>(actuals)) {
419 static_assert(
sizeof...(Formals) ==
sizeof...(Actuals),
"Argument Count Mismatch");
420 std::tuple<Formals...> to_formals{std::move(actuals)};
424 #if defined(HIP_TEMPLATE_KERNEL_LAUNCH)
425 template <
typename... Args,
typename F = void (*)(Args...)>
426 void hipLaunchKernelGGL(F kernel,
const dim3& numBlocks,
const dim3& dimBlocks,
427 std::uint32_t sharedMemBytes,
hipStream_t stream, Args... args) {
428 constexpr
size_t count =
sizeof...(Args);
429 auto tup_ = std::tuple<Args...>{args...};
430 auto tup = validateArgsCountType(kernel, tup_);
432 pArgs<0>(tup, _Args);
434 auto k =
reinterpret_cast<void*
>(kernel);
435 hipLaunchKernel(k, numBlocks, dimBlocks, _Args, sharedMemBytes, stream);
438 #define hipLaunchKernelGGLInternal(kernelName, numBlocks, numThreads, memPerBlock, streamId, ...) \
440 kernelName<<<(numBlocks), (numThreads), (memPerBlock), (streamId)>>>(__VA_ARGS__); \
443 #define hipLaunchKernelGGL(kernelName, ...) hipLaunchKernelGGLInternal((kernelName), __VA_ARGS__)
447 extern "C" __device__ __attribute__((
const)) size_t __ockl_get_local_id(uint);
448 extern "C" __device__ __attribute__((const))
size_t __ockl_get_group_id(uint);
449 extern "C" __device__ __attribute__((const))
size_t __ockl_get_local_size(uint);
450 extern "C" __device__ __attribute__((const))
size_t __ockl_get_num_groups(uint);
451 struct __HIP_BlockIdx {
453 std::uint32_t operator()(std::uint32_t x)
const noexcept {
return __ockl_get_group_id(x); }
455 struct __HIP_BlockDim {
457 std::uint32_t operator()(std::uint32_t x)
const noexcept {
458 return __ockl_get_local_size(x);
461 struct __HIP_GridDim {
463 std::uint32_t operator()(std::uint32_t x)
const noexcept {
464 return __ockl_get_num_groups(x);
467 struct __HIP_ThreadIdx {
469 std::uint32_t operator()(std::uint32_t x)
const noexcept {
470 return __ockl_get_local_id(x);
474 template <
typename F>
475 struct __HIP_Coordinates {
476 using R = decltype(F{}(0));
478 struct X { __device__
operator R() const noexcept {
return F{}(0); } };
479 struct Y { __device__
operator R() const noexcept {
return F{}(1); } };
480 struct Z { __device__
operator R() const noexcept {
return F{}(2); } };
482 static constexpr X x{};
483 static constexpr Y y{};
484 static constexpr Z z{};
486 __device__
operator dim3()
const {
return dim3(x, y, z); }
490 template <
typename F>
491 #if !defined(_MSC_VER)
492 __attribute__((weak))
494 constexpr
typename __HIP_Coordinates<F>::X __HIP_Coordinates<F>::x;
495 template <
typename F>
496 #if !defined(_MSC_VER)
497 __attribute__((weak))
499 constexpr
typename __HIP_Coordinates<F>::Y __HIP_Coordinates<F>::y;
500 template <
typename F>
501 #if !defined(_MSC_VER)
502 __attribute__((weak))
504 constexpr
typename __HIP_Coordinates<F>::Z __HIP_Coordinates<F>::z;
506 extern "C" __device__ __attribute__((
const)) size_t __ockl_get_global_size(uint);
509 std::uint32_t operator*(__HIP_Coordinates<__HIP_GridDim>::X,
510 __HIP_Coordinates<__HIP_BlockDim>::X) noexcept {
511 return __ockl_get_global_size(0);
515 std::uint32_t operator*(__HIP_Coordinates<__HIP_BlockDim>::X,
516 __HIP_Coordinates<__HIP_GridDim>::X) noexcept {
517 return __ockl_get_global_size(0);
521 std::uint32_t operator*(__HIP_Coordinates<__HIP_GridDim>::Y,
522 __HIP_Coordinates<__HIP_BlockDim>::Y) noexcept {
523 return __ockl_get_global_size(1);
527 std::uint32_t operator*(__HIP_Coordinates<__HIP_BlockDim>::Y,
528 __HIP_Coordinates<__HIP_GridDim>::Y) noexcept {
529 return __ockl_get_global_size(1);
533 std::uint32_t operator*(__HIP_Coordinates<__HIP_GridDim>::Z,
534 __HIP_Coordinates<__HIP_BlockDim>::Z) noexcept {
535 return __ockl_get_global_size(2);
539 std::uint32_t operator*(__HIP_Coordinates<__HIP_BlockDim>::Z,
540 __HIP_Coordinates<__HIP_GridDim>::Z) noexcept {
541 return __ockl_get_global_size(2);
544 static constexpr __HIP_Coordinates<__HIP_BlockDim> blockDim{};
545 static constexpr __HIP_Coordinates<__HIP_BlockIdx> blockIdx{};
546 static constexpr __HIP_Coordinates<__HIP_GridDim> gridDim{};
547 static constexpr __HIP_Coordinates<__HIP_ThreadIdx> threadIdx{};
549 extern "C" __device__ __attribute__((
const)) size_t __ockl_get_local_id(uint);
550 #define hipThreadIdx_x (__ockl_get_local_id(0))
551 #define hipThreadIdx_y (__ockl_get_local_id(1))
552 #define hipThreadIdx_z (__ockl_get_local_id(2))
554 extern "C" __device__ __attribute__((
const)) size_t __ockl_get_group_id(uint);
555 #define hipBlockIdx_x (__ockl_get_group_id(0))
556 #define hipBlockIdx_y (__ockl_get_group_id(1))
557 #define hipBlockIdx_z (__ockl_get_group_id(2))
559 extern "C" __device__ __attribute__((
const)) size_t __ockl_get_local_size(uint);
560 #define hipBlockDim_x (__ockl_get_local_size(0))
561 #define hipBlockDim_y (__ockl_get_local_size(1))
562 #define hipBlockDim_z (__ockl_get_local_size(2))
564 extern "C" __device__ __attribute__((
const)) size_t __ockl_get_num_groups(uint);
565 #define hipGridDim_x (__ockl_get_num_groups(0))
566 #define hipGridDim_y (__ockl_get_num_groups(1))
567 #define hipGridDim_z (__ockl_get_num_groups(2))
569 #include <hip/hcc_detail/math_functions.h>
571 #if __HIP_HCC_COMPAT_MODE__
573 #pragma push_macro("__DEFINE_HCC_FUNC")
574 #define __DEFINE_HCC_FUNC(hc_fun,hip_var) \
575 inline __device__ __attribute__((always_inline)) uint hc_get_##hc_fun(uint i) { \
584 __DEFINE_HCC_FUNC(workitem_id, threadIdx)
585 __DEFINE_HCC_FUNC(group_id, blockIdx)
586 __DEFINE_HCC_FUNC(group_size, blockDim)
587 __DEFINE_HCC_FUNC(num_groups, gridDim)
588 #pragma pop_macro("__DEFINE_HCC_FUNC")
590 extern "C" __device__ __attribute__((
const)) size_t __ockl_get_global_id(uint);
591 inline __device__ __attribute__((always_inline)) uint
592 hc_get_workitem_absolute_id(
int dim)
594 return (uint)__ockl_get_global_id(dim);
599 #if !__CLANG_HIP_RUNTIME_WRAPPER_INCLUDED__
601 #if !_OPENMP || __HIP_ENABLE_CUDA_WRAPPER_FOR_OPENMP__
602 #pragma push_macro("__CUDA__")
604 #include <__clang_cuda_math_forward_declares.h>
605 #include <__clang_cuda_complex_builtins.h>
611 #include <include/cuda_wrappers/algorithm>
612 #include <include/cuda_wrappers/complex>
613 #include <include/cuda_wrappers/new>
615 #pragma pop_macro("__CUDA__")
616 #endif // !_OPENMP || __HIP_ENABLE_CUDA_WRAPPER_FOR_OPENMP__
617 #endif // !__CLANG_HIP_RUNTIME_WRAPPER_INCLUDED__
618 #endif // defined(__clang__) && defined(__HIP__)
620 #include <hip/hcc_detail/hip_memory.h>
622 #endif // HIP_HCC_DETAIL_RUNTIME_H