Skip to content

Commit 8d7b35e

Browse files
authored
[CUDA] Remove obsolete GPU-side __constexpr_* wrappers. (#139164)
1 parent a4186bd commit 8d7b35e

File tree

1 file changed

+0
-50
lines changed
  • clang/lib/Headers/cuda_wrappers

1 file changed

+0
-50
lines changed

clang/lib/Headers/cuda_wrappers/cmath

Lines changed: 0 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -39,56 +39,6 @@
3939
__attribute__((device)) long double logb(long double);
4040
__attribute__((device)) long double scalbn(long double, int);
4141

42-
namespace std {
43-
44-
// For __constexpr_fmin/fmax we only need device-side overloads before c++14
45-
// where they are not constexpr.
46-
#if _LIBCPP_STD_VER < 14
47-
48-
__attribute__((device))
49-
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 float __constexpr_fmax(float __x, float __y) _NOEXCEPT {
50-
return __builtin_fmaxf(__x, __y);
51-
}
52-
53-
__attribute__((device))
54-
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 double __constexpr_fmax(double __x, double __y) _NOEXCEPT {
55-
return __builtin_fmax(__x, __y);
56-
}
57-
58-
__attribute__((device))
59-
inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 long double
60-
__constexpr_fmax(long double __x, long double __y) _NOEXCEPT {
61-
return __builtin_fmaxl(__x, __y);
62-
}
63-
64-
template <class _Tp, class _Up, __enable_if_t<is_arithmetic<_Tp>::value && is_arithmetic<_Up>::value, int> = 0>
65-
__attribute__((device))
66-
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 typename __promote<_Tp, _Up>::type
67-
__constexpr_fmax(_Tp __x, _Up __y) _NOEXCEPT {
68-
using __result_type = typename __promote<_Tp, _Up>::type;
69-
return std::__constexpr_fmax(static_cast<__result_type>(__x), static_cast<__result_type>(__y));
70-
}
71-
#endif // _LIBCPP_STD_VER < 14
72-
73-
// For logb/scalbn templates we must always provide device overloads because
74-
// libc++ implementation uses __builtin_XXX which gets translated into a libcall
75-
// which we can't handle on GPU. We need to forward those to CUDA-provided
76-
// implementations.
77-
78-
template <class _Tp>
79-
__attribute__((device))
80-
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp __constexpr_logb(_Tp __x) {
81-
return ::logb(__x);
82-
}
83-
84-
template <class _Tp>
85-
__attribute__((device))
86-
_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _Tp __constexpr_scalbn(_Tp __x, int __exp) {
87-
return ::scalbn(__x, __exp);
88-
}
89-
90-
} // namespace std//
91-
9242
#endif // _LIBCPP_STD_VER
9343

9444
#endif // include guard

0 commit comments

Comments
 (0)