Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Floating <--> fixed-point conversion must now be called explicitly #15438

Merged
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
89c724a
Floating <--> fixed-point conversion must now be called explicitly
pmattione-nvidia Apr 2, 2024
50650c2
Update cpp/include/cudf/unary.hpp
pmattione-nvidia Apr 5, 2024
9cabc8d
For types_casted_writer, add explicit conversion call for decimal -> …
pmattione-nvidia Apr 8, 2024
52d3197
xMerge branch 'fixed_explicit_convert' of https://github.com/pmattion…
pmattione-nvidia Apr 8, 2024
d15e2fb
Fix formatting
pmattione-nvidia Apr 8, 2024
6053d49
Add notes
pmattione-nvidia Apr 9, 2024
e702108
Update cpp/src/binaryop/compiled/binary_ops.cuh
pmattione-nvidia Apr 9, 2024
66a6bd5
Spelling fixes
pmattione-nvidia Apr 9, 2024
2cbe687
Merge branch 'fixed_explicit_convert' of https://github.com/pmattione…
pmattione-nvidia Apr 9, 2024
28ddcd3
Update cpp/include/cudf/unary.hpp
pmattione-nvidia Apr 10, 2024
838c210
Update cpp/include/cudf/unary.hpp
pmattione-nvidia Apr 10, 2024
8d944cc
Update cpp/include/cudf/unary.hpp
pmattione-nvidia Apr 10, 2024
71b3c31
Update cpp/include/cudf/unary.hpp
pmattione-nvidia Apr 10, 2024
c323383
Update cpp/include/cudf/unary.hpp
pmattione-nvidia Apr 10, 2024
dd1ac80
Update cpp/include/cudf/unary.hpp
pmattione-nvidia Apr 10, 2024
70faf4c
Update cpp/include/cudf/unary.hpp
pmattione-nvidia Apr 10, 2024
65f8313
Update cpp/include/cudf/unary.hpp
pmattione-nvidia Apr 10, 2024
89f8a7d
Update cpp/include/cudf/unary.hpp
pmattione-nvidia Apr 10, 2024
c9ca9a5
yet another style change
pmattione-nvidia Apr 10, 2024
ea6de44
Merge branch 'branch-24.06' into fixed_explicit_convert
pmattione-nvidia Apr 10, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
49 changes: 4 additions & 45 deletions cpp/include/cudf/fixed_point/fixed_point.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,18 +67,6 @@ constexpr inline auto is_supported_representation_type()
cuda::std::is_same_v<T, __int128_t>;
}

/**
* @brief Returns `true` if the value type is supported for constructing a `fixed_point`
*
* @tparam T The construction value type
* @return `true` if the value type is supported to construct a `fixed_point` type
*/
template <typename T>
constexpr inline auto is_supported_construction_value_type()
{
return cuda::std::is_integral<T>() || cuda::std::is_floating_point_v<T>;
}

/** @} */ // end of group

// Helper functions for `fixed_point` type
Expand Down Expand Up @@ -222,23 +210,8 @@ class fixed_point {
scale_type _scale;

public:
using rep = Rep; ///< The representation type

/**
* @brief Constructor that will perform shifting to store value appropriately (from floating point
* types)
*
* @tparam T The floating point type that you are constructing from
* @param value The value that will be constructed from
* @param scale The exponent that is applied to Rad to perform shifting
*/
template <typename T,
typename cuda::std::enable_if_t<cuda::std::is_floating_point<T>() &&
is_supported_representation_type<Rep>()>* = nullptr>
CUDF_HOST_DEVICE inline explicit fixed_point(T const& value, scale_type const& scale)
harrism marked this conversation as resolved.
Show resolved Hide resolved
: _value{static_cast<Rep>(detail::shift<Rep, Rad>(value, scale))}, _scale{scale}
{
}
using rep = Rep; ///< The representation type
static constexpr auto rad = Rad; ///< The base

/**
* @brief Constructor that will perform shifting to store value appropriately (from integral
Expand All @@ -249,7 +222,7 @@ class fixed_point {
* @param scale The exponent that is applied to Rad to perform shifting
*/
template <typename T,
typename cuda::std::enable_if_t<cuda::std::is_integral<T>() &&
typename cuda::std::enable_if_t<cuda::std::is_integral_v<T> &&
is_supported_representation_type<Rep>()>* = nullptr>
CUDF_HOST_DEVICE inline explicit fixed_point(T const& value, scale_type const& scale)
// `value` is cast to `Rep` to avoid overflow in cases where
Expand All @@ -275,8 +248,7 @@ class fixed_point {
* @tparam T The value type being constructing from
* @param value The value that will be constructed from
*/
template <typename T,
typename cuda::std::enable_if_t<is_supported_construction_value_type<T>()>* = nullptr>
template <typename T, typename cuda::std::enable_if_t<cuda::std::is_integral_v<T>>* = nullptr>
CUDF_HOST_DEVICE inline fixed_point(T const& value)
: _value{static_cast<Rep>(value)}, _scale{scale_type{0}}
{
Expand All @@ -288,19 +260,6 @@ class fixed_point {
*/
CUDF_HOST_DEVICE inline fixed_point() : _scale{scale_type{0}} {}

/**
* @brief Explicit conversion operator for casting to floating point types
*
* @tparam U The floating point type that is being explicitly converted to
* @return The `fixed_point` number in base 10 (aka human readable format)
*/
template <typename U,
typename cuda::std::enable_if_t<cuda::std::is_floating_point_v<U>>* = nullptr>
explicit constexpr operator U() const
harrism marked this conversation as resolved.
Show resolved Hide resolved
{
return detail::shift<Rep, Rad>(static_cast<U>(_value), scale_type{-_scale});
}

/**
* @brief Explicit conversion operator for casting to integral types
*
Expand Down
63 changes: 62 additions & 1 deletion cpp/include/cudf/unary.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
* Copyright (c) 2018-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -16,8 +16,10 @@

#pragma once

#include <cudf/fixed_point/fixed_point.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/traits.hpp>

#include <rmm/mr/device/per_device_resource.hpp>

Expand All @@ -31,6 +33,65 @@ namespace cudf {
* @brief Column APIs for unary ops
*/

/**
pmattione-nvidia marked this conversation as resolved.
Show resolved Hide resolved
* @brief Convert a floating-point value to fixed-point
pmattione-nvidia marked this conversation as resolved.
Show resolved Hide resolved
*
* @tparam Fixed The fixed-point type we are converting to
* @tparam Floating The floating-point type we are converting from
* @param floating The floating-point value to convert
* @param scale The desired scale of the fixed-point value
* @return The converted to fixed-point value
pmattione-nvidia marked this conversation as resolved.
Show resolved Hide resolved
*/
template <typename Fixed,
typename Floating,
typename cuda::std::enable_if_t<is_fixed_point<Fixed>() &&
cuda::std::is_floating_point_v<Floating>>* = nullptr>
CUDF_HOST_DEVICE Fixed convert_floating_to_fixed(Floating floating, numeric::scale_type scale)
{
using Rep = typename Fixed::rep;
auto const shifted = numeric::detail::shift<Rep, Fixed::rad>(floating, scale);
numeric::scaled_integer<Rep> scaled{static_cast<Rep>(shifted), scale};
return Fixed(scaled);
}

/** @brief Convert a fixed-point value to floating-point
pmattione-nvidia marked this conversation as resolved.
Show resolved Hide resolved
pmattione-nvidia marked this conversation as resolved.
Show resolved Hide resolved
*
* @tparam Floating The floating-point type we are converting to
* @tparam Fixed The fixed-point type we are converting from
* @param fixed The fixed-point value to convert
* @return The converted-to floating-point value.
pmattione-nvidia marked this conversation as resolved.
Show resolved Hide resolved
*/
template <typename Floating,
typename Fixed,
typename cuda::std::enable_if_t<cuda::std::is_floating_point_v<Floating> &&
is_fixed_point<Fixed>()>* = nullptr>
CUDF_HOST_DEVICE Floating convert_fixed_to_floating(Fixed fixed)
{
using Rep = typename Fixed::rep;
auto const casted = static_cast<Floating>(fixed.value());
auto const scale = numeric::scale_type{-fixed.scale()};
return numeric::detail::shift<Rep, Fixed::rad>(casted, scale);
}

/** @brief Convert a value to floating-point
pmattione-nvidia marked this conversation as resolved.
Show resolved Hide resolved
pmattione-nvidia marked this conversation as resolved.
Show resolved Hide resolved
*
* @tparam Floating The floating-point type we are converting to
* @tparam Input The input type we are converting from
* @param input The input value to convert
* @return The converted-to floating-point value.
pmattione-nvidia marked this conversation as resolved.
Show resolved Hide resolved
*/
template <typename Floating,
typename Input,
typename cuda::std::enable_if_t<cuda::std::is_floating_point_v<Floating>>* = nullptr>
CUDF_HOST_DEVICE Floating convert_to_floating(Input input)
{
if constexpr (is_fixed_point<Input>()) {
return convert_fixed_to_floating<Floating>(input);
} else {
return static_cast<Floating>(input);
}
}

/**
* @brief Types of unary operations that can be performed on data.
*/
Expand Down
7 changes: 5 additions & 2 deletions cpp/include/cudf/utilities/traits.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
* Copyright (c) 2019-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -397,7 +397,10 @@ template <typename T>
constexpr inline bool is_fixed_point()
{
return std::is_same_v<numeric::decimal32, T> || std::is_same_v<numeric::decimal64, T> ||
std::is_same_v<numeric::decimal128, T>;
std::is_same_v<numeric::decimal128, T> ||
std::is_same_v<numeric::fixed_point<int32_t, numeric::Radix::BASE_2>, T> ||
std::is_same_v<numeric::fixed_point<int64_t, numeric::Radix::BASE_2>, T> ||
std::is_same_v<numeric::fixed_point<__int128_t, numeric::Radix::BASE_2>, T>;
davidwendt marked this conversation as resolved.
Show resolved Hide resolved
}

/**
Expand Down
19 changes: 12 additions & 7 deletions cpp/src/binaryop/compiled/binary_ops.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <cudf/unary.hpp>

#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
Expand Down Expand Up @@ -69,13 +70,17 @@ struct typed_casted_writer {
if constexpr (mutable_column_device_view::has_element_accessor<Element>() and
std::is_constructible_v<Element, FromType>) {
col.element<Element>(i) = static_cast<Element>(val);
} else if constexpr (is_fixed_point<Element>() and
(is_fixed_point<FromType>() or
std::is_constructible_v<Element, FromType>)) {
if constexpr (is_fixed_point<FromType>())
col.data<Element::rep>()[i] = val.rescaled(numeric::scale_type{col.type().scale()}).value();
else
col.data<Element::rep>()[i] = Element{val, numeric::scale_type{col.type().scale()}}.value();
} else if constexpr (is_fixed_point<Element>()) {
auto scale = numeric::scale_type{col.type().scale()};
pmattione-nvidia marked this conversation as resolved.
Show resolved Hide resolved
if constexpr (is_fixed_point<FromType>()) {
col.data<Element::rep>()[i] = val.rescaled(scale).value();
} else if constexpr (cuda::std::is_constructible_v<Element, FromType>) {
col.data<Element::rep>()[i] = Element{val, scale}.value();
} else if constexpr (cuda::std::is_floating_point_v<FromType>) {
col.data<Element::rep>()[i] = convert_floating_to_fixed<Element>(val, scale).value();
pmattione-nvidia marked this conversation as resolved.
Show resolved Hide resolved
}
} else if constexpr (cuda::std::is_floating_point_v<Element> and is_fixed_point<FromType>()) {
col.data<Element>()[i] = convert_fixed_to_floating<Element>(val);
}
}
};
Expand Down
9 changes: 5 additions & 4 deletions cpp/src/quantiles/quantiles_util.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

#include <cudf/detail/utilities/assert.cuh>
#include <cudf/types.hpp>
#include <cudf/unary.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>

Expand Down Expand Up @@ -46,8 +47,8 @@ CUDF_HOST_DEVICE inline Result linear(T lhs, T rhs, double frac)
// Underflow may occur when converting int64 to double
// detail: https://github.com/rapidsai/cudf/issues/1417

auto dlhs = static_cast<double>(lhs);
auto drhs = static_cast<double>(rhs);
auto dlhs = convert_to_floating<double>(lhs);
auto drhs = convert_to_floating<double>(rhs);
pmattione-nvidia marked this conversation as resolved.
Show resolved Hide resolved
double one_minus_frac = 1.0 - frac;
return static_cast<Result>(one_minus_frac * dlhs + frac * drhs);
}
Expand All @@ -56,8 +57,8 @@ template <typename Result, typename T>
CUDF_HOST_DEVICE inline Result midpoint(T lhs, T rhs)
{
// TODO: try std::midpoint (C++20) if available
auto dlhs = static_cast<double>(lhs);
auto drhs = static_cast<double>(rhs);
auto dlhs = convert_to_floating<double>(lhs);
auto drhs = convert_to_floating<double>(rhs);
return static_cast<Result>(dlhs / 2 + drhs / 2);
}

Expand Down
14 changes: 8 additions & 6 deletions cpp/src/quantiles/tdigest/tdigest_aggregation.cu
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
#include <cudf/detail/tdigest/tdigest.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/unary.hpp>
#include <cudf/utilities/span.hpp>

#include <rmm/device_uvector.hpp>
Expand Down Expand Up @@ -73,7 +74,7 @@ struct make_centroid {
centroid operator() __device__(size_type index) const
{
auto const is_valid = col.is_valid(index);
auto const mean = is_valid ? static_cast<double>(col.element<T>(index)) : 0.0;
auto const mean = is_valid ? convert_to_floating<double>(col.element<T>(index)) : 0.0;
auto const weight = is_valid ? 1.0 : 0.0;
return {mean, weight, is_valid};
}
Expand All @@ -87,7 +88,7 @@ struct make_centroid_no_nulls {

centroid operator() __device__(size_type index) const
{
return {static_cast<double>(col.element<T>(index)), 1.0, true};
return {convert_to_floating<double>(col.element<T>(index)), 1.0, true};
}
};

Expand Down Expand Up @@ -808,8 +809,9 @@ struct get_scalar_minmax_grouped {
auto const valid_count = group_valid_counts[group_index];
return valid_count > 0
? thrust::make_tuple(
static_cast<double>(col.element<T>(group_offsets[group_index])),
static_cast<double>(col.element<T>(group_offsets[group_index] + valid_count - 1)))
convert_to_floating<double>(col.element<T>(group_offsets[group_index])),
convert_to_floating<double>(
col.element<T>(group_offsets[group_index] + valid_count - 1)))
: thrust::make_tuple(0.0, 0.0);
}
};
Expand All @@ -823,8 +825,8 @@ struct get_scalar_minmax {
__device__ thrust::tuple<double, double> operator()(size_type)
{
return valid_count > 0
? thrust::make_tuple(static_cast<double>(col.element<T>(0)),
static_cast<double>(col.element<T>(valid_count - 1)))
? thrust::make_tuple(convert_to_floating<double>(col.element<T>(0)),
convert_to_floating<double>(col.element<T>(valid_count - 1)))
: thrust::make_tuple(0.0, 0.0);
}
};
Expand Down
16 changes: 12 additions & 4 deletions cpp/src/unary/cast_ops.cu
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
* Copyright (c) 2019-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -116,8 +116,12 @@ struct fixed_point_unary_cast {
std::enable_if_t<(cudf::is_fixed_point<_SourceT>() && cudf::is_numeric<TargetT>())>* = nullptr>
__device__ inline TargetT operator()(DeviceT const element)
{
auto const fp = SourceT{numeric::scaled_integer<DeviceT>{element, scale}};
return static_cast<TargetT>(fp);
auto const fixed_point = SourceT{numeric::scaled_integer<DeviceT>{element, scale}};
if constexpr (cuda::std::is_floating_point_v<TargetT>) {
return convert_fixed_to_floating<TargetT>(fixed_point);
} else {
return static_cast<TargetT>(fixed_point);
}
}

template <
Expand All @@ -126,7 +130,11 @@ struct fixed_point_unary_cast {
std::enable_if_t<(cudf::is_numeric<_SourceT>() && cudf::is_fixed_point<TargetT>())>* = nullptr>
__device__ inline DeviceT operator()(SourceT const element)
{
return TargetT{element, scale}.value();
if constexpr (cuda::std::is_floating_point_v<SourceT>) {
return convert_floating_to_fixed<TargetT>(element, scale).value();
} else {
return TargetT{element, scale}.value();
}
}
};

Expand Down
Loading
Loading