Skip to content

Commit

Permalink
Floating <--> fixed-point conversion must now be called explicitly (#…
Browse files Browse the repository at this point in the history
…15438)

This change makes it so fixed_point objects can no longer be constructed with floating point values, and can no longer be casted to floating point values.  Instead the functions added to unary.hpp must be explicitly called.  

In addition to making it more clear when and where these conversions are occurring, this also makes it so that the low-level fixed_point.hpp header won't be inundated with all of the complex lossless conversion code to come.

Authors:
  - Paul Mattione (https://github.com/pmattione-nvidia)

Approvers:
  - Karthikeyan (https://github.com/karthikeyann)
  - Shruti Shivakumar (https://github.com/shrshi)
  - Mark Harris (https://github.com/harrism)

URL: #15438
  • Loading branch information
pmattione-nvidia authored Apr 10, 2024
1 parent 460b41e commit 888e9d5
Show file tree
Hide file tree
Showing 9 changed files with 219 additions and 161 deletions.
49 changes: 4 additions & 45 deletions cpp/include/cudf/fixed_point/fixed_point.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,18 +67,6 @@ constexpr inline auto is_supported_representation_type()
cuda::std::is_same_v<T, __int128_t>;
}

/**
* @brief Returns `true` if the value type is supported for constructing a `fixed_point`
*
* @tparam T The construction value type
* @return `true` if the value type is supported to construct a `fixed_point` type
*/
template <typename T>
constexpr inline auto is_supported_construction_value_type()
{
return cuda::std::is_integral<T>() || cuda::std::is_floating_point_v<T>;
}

/** @} */ // end of group

// Helper functions for `fixed_point` type
Expand Down Expand Up @@ -222,23 +210,8 @@ class fixed_point {
scale_type _scale;

public:
using rep = Rep; ///< The representation type

/**
* @brief Constructor that will perform shifting to store value appropriately (from floating point
* types)
*
* @tparam T The floating point type that you are constructing from
* @param value The value that will be constructed from
* @param scale The exponent that is applied to Rad to perform shifting
*/
template <typename T,
typename cuda::std::enable_if_t<cuda::std::is_floating_point<T>() &&
is_supported_representation_type<Rep>()>* = nullptr>
CUDF_HOST_DEVICE inline explicit fixed_point(T const& value, scale_type const& scale)
: _value{static_cast<Rep>(detail::shift<Rep, Rad>(value, scale))}, _scale{scale}
{
}
using rep = Rep; ///< The representation type
static constexpr auto rad = Rad; ///< The base

/**
* @brief Constructor that will perform shifting to store value appropriately (from integral
Expand All @@ -249,7 +222,7 @@ class fixed_point {
* @param scale The exponent that is applied to Rad to perform shifting
*/
template <typename T,
typename cuda::std::enable_if_t<cuda::std::is_integral<T>() &&
typename cuda::std::enable_if_t<cuda::std::is_integral_v<T> &&
is_supported_representation_type<Rep>()>* = nullptr>
CUDF_HOST_DEVICE inline explicit fixed_point(T const& value, scale_type const& scale)
// `value` is cast to `Rep` to avoid overflow in cases where
Expand All @@ -275,8 +248,7 @@ class fixed_point {
* @tparam T The value type being constructing from
* @param value The value that will be constructed from
*/
template <typename T,
typename cuda::std::enable_if_t<is_supported_construction_value_type<T>()>* = nullptr>
template <typename T, typename cuda::std::enable_if_t<cuda::std::is_integral_v<T>>* = nullptr>
CUDF_HOST_DEVICE inline fixed_point(T const& value)
: _value{static_cast<Rep>(value)}, _scale{scale_type{0}}
{
Expand All @@ -288,19 +260,6 @@ class fixed_point {
*/
CUDF_HOST_DEVICE inline fixed_point() : _scale{scale_type{0}} {}

/**
* @brief Explicit conversion operator for casting to floating point types
*
* @tparam U The floating point type that is being explicitly converted to
* @return The `fixed_point` number in base 10 (aka human readable format)
*/
template <typename U,
typename cuda::std::enable_if_t<cuda::std::is_floating_point_v<U>>* = nullptr>
explicit constexpr operator U() const
{
return detail::shift<Rep, Rad>(static_cast<U>(_value), scale_type{-_scale});
}

/**
* @brief Explicit conversion operator for casting to integral types
*
Expand Down
75 changes: 74 additions & 1 deletion cpp/include/cudf/unary.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2023, NVIDIA CORPORATION.
* Copyright (c) 2018-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -16,8 +16,10 @@

#pragma once

#include <cudf/fixed_point/fixed_point.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/default_stream.hpp>
#include <cudf/utilities/traits.hpp>

#include <rmm/mr/device/per_device_resource.hpp>

Expand All @@ -31,6 +33,77 @@ namespace cudf {
* @brief Column APIs for unary ops
*/

/**
* @brief Convert a floating-point value to fixed point
*
* @note This conversion was moved from fixed-point member functions to free functions.
* This is so that the complex conversion code is not included into many parts of the
* code base that don't need it, and so that it's more obvious to pinpoint where these
* conversions are occurring.
*
* @tparam Fixed The fixed-point type to convert to
* @tparam Floating The floating-point type to convert from
* @param floating The floating-point value to convert
* @param scale The desired scale of the fixed-point value
* @return The converted fixed-point value
*/
template <typename Fixed,
typename Floating,
typename cuda::std::enable_if_t<is_fixed_point<Fixed>() &&
cuda::std::is_floating_point_v<Floating>>* = nullptr>
CUDF_HOST_DEVICE Fixed convert_floating_to_fixed(Floating floating, numeric::scale_type scale)
{
using Rep = typename Fixed::rep;
auto const shifted = numeric::detail::shift<Rep, Fixed::rad>(floating, scale);
numeric::scaled_integer<Rep> scaled{static_cast<Rep>(shifted), scale};
return Fixed(scaled);
}

/**
* @brief Convert a fixed-point value to floating point
*
* @note This conversion was moved from fixed-point member functions to free functions.
* This is so that the complex conversion code is not included into many parts of the
* code base that don't need it, and so that it's more obvious to pinpoint where these
* conversions are occurring.
*
* @tparam Floating The floating-point type to convert to
* @tparam Fixed The fixed-point type to convert from
* @param fixed The fixed-point value to convert
* @return The converted floating-point value
*/
template <typename Floating,
typename Fixed,
typename cuda::std::enable_if_t<cuda::std::is_floating_point_v<Floating> &&
is_fixed_point<Fixed>()>* = nullptr>
CUDF_HOST_DEVICE Floating convert_fixed_to_floating(Fixed fixed)
{
using Rep = typename Fixed::rep;
auto const casted = static_cast<Floating>(fixed.value());
auto const scale = numeric::scale_type{-fixed.scale()};
return numeric::detail::shift<Rep, Fixed::rad>(casted, scale);
}

/**
* @brief Convert a value to floating point
*
* @tparam Floating The floating-point type to convert to
* @tparam Input The input type to convert from
* @param input The input value to convert
* @return The converted floating-point value
*/
template <typename Floating,
typename Input,
typename cuda::std::enable_if_t<cuda::std::is_floating_point_v<Floating>>* = nullptr>
CUDF_HOST_DEVICE Floating convert_to_floating(Input input)
{
if constexpr (is_fixed_point<Input>()) {
return convert_fixed_to_floating<Floating>(input);
} else {
return static_cast<Floating>(input);
}
}

/**
* @brief Types of unary operations that can be performed on data.
*/
Expand Down
7 changes: 5 additions & 2 deletions cpp/include/cudf/utilities/traits.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
* Copyright (c) 2019-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -397,7 +397,10 @@ template <typename T>
constexpr inline bool is_fixed_point()
{
return std::is_same_v<numeric::decimal32, T> || std::is_same_v<numeric::decimal64, T> ||
std::is_same_v<numeric::decimal128, T>;
std::is_same_v<numeric::decimal128, T> ||
std::is_same_v<numeric::fixed_point<int32_t, numeric::Radix::BASE_2>, T> ||
std::is_same_v<numeric::fixed_point<int64_t, numeric::Radix::BASE_2>, T> ||
std::is_same_v<numeric::fixed_point<__int128_t, numeric::Radix::BASE_2>, T>;
}

/**
Expand Down
19 changes: 12 additions & 7 deletions cpp/src/binaryop/compiled/binary_ops.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_view.hpp>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <cudf/unary.hpp>

#include <rmm/cuda_stream_view.hpp>
#include <rmm/exec_policy.hpp>
Expand Down Expand Up @@ -69,13 +70,17 @@ struct typed_casted_writer {
if constexpr (mutable_column_device_view::has_element_accessor<Element>() and
std::is_constructible_v<Element, FromType>) {
col.element<Element>(i) = static_cast<Element>(val);
} else if constexpr (is_fixed_point<Element>() and
(is_fixed_point<FromType>() or
std::is_constructible_v<Element, FromType>)) {
if constexpr (is_fixed_point<FromType>())
col.data<Element::rep>()[i] = val.rescaled(numeric::scale_type{col.type().scale()}).value();
else
col.data<Element::rep>()[i] = Element{val, numeric::scale_type{col.type().scale()}}.value();
} else if constexpr (is_fixed_point<Element>()) {
auto const scale = numeric::scale_type{col.type().scale()};
if constexpr (is_fixed_point<FromType>()) {
col.data<Element::rep>()[i] = val.rescaled(scale).value();
} else if constexpr (cuda::std::is_constructible_v<Element, FromType>) {
col.data<Element::rep>()[i] = Element{val, scale}.value();
} else if constexpr (cuda::std::is_floating_point_v<FromType>) {
col.data<Element::rep>()[i] = convert_floating_to_fixed<Element>(val, scale).value();
}
} else if constexpr (cuda::std::is_floating_point_v<Element> and is_fixed_point<FromType>()) {
col.data<Element>()[i] = convert_fixed_to_floating<Element>(val);
}
}
};
Expand Down
9 changes: 5 additions & 4 deletions cpp/src/quantiles/quantiles_util.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

#include <cudf/detail/utilities/assert.cuh>
#include <cudf/types.hpp>
#include <cudf/unary.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>

Expand Down Expand Up @@ -46,8 +47,8 @@ CUDF_HOST_DEVICE inline Result linear(T lhs, T rhs, double frac)
// Underflow may occur when converting int64 to double
// detail: https://github.com/rapidsai/cudf/issues/1417

auto dlhs = static_cast<double>(lhs);
auto drhs = static_cast<double>(rhs);
auto dlhs = convert_to_floating<double>(lhs);
auto drhs = convert_to_floating<double>(rhs);
double one_minus_frac = 1.0 - frac;
return static_cast<Result>(one_minus_frac * dlhs + frac * drhs);
}
Expand All @@ -56,8 +57,8 @@ template <typename Result, typename T>
CUDF_HOST_DEVICE inline Result midpoint(T lhs, T rhs)
{
// TODO: try std::midpoint (C++20) if available
auto dlhs = static_cast<double>(lhs);
auto drhs = static_cast<double>(rhs);
auto dlhs = convert_to_floating<double>(lhs);
auto drhs = convert_to_floating<double>(rhs);
return static_cast<Result>(dlhs / 2 + drhs / 2);
}

Expand Down
14 changes: 8 additions & 6 deletions cpp/src/quantiles/tdigest/tdigest_aggregation.cu
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
#include <cudf/detail/tdigest/tdigest.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/lists/lists_column_view.hpp>
#include <cudf/unary.hpp>
#include <cudf/utilities/span.hpp>

#include <rmm/device_uvector.hpp>
Expand Down Expand Up @@ -73,7 +74,7 @@ struct make_centroid {
centroid operator() __device__(size_type index) const
{
auto const is_valid = col.is_valid(index);
auto const mean = is_valid ? static_cast<double>(col.element<T>(index)) : 0.0;
auto const mean = is_valid ? convert_to_floating<double>(col.element<T>(index)) : 0.0;
auto const weight = is_valid ? 1.0 : 0.0;
return {mean, weight, is_valid};
}
Expand All @@ -87,7 +88,7 @@ struct make_centroid_no_nulls {

centroid operator() __device__(size_type index) const
{
return {static_cast<double>(col.element<T>(index)), 1.0, true};
return {convert_to_floating<double>(col.element<T>(index)), 1.0, true};
}
};

Expand Down Expand Up @@ -808,8 +809,9 @@ struct get_scalar_minmax_grouped {
auto const valid_count = group_valid_counts[group_index];
return valid_count > 0
? thrust::make_tuple(
static_cast<double>(col.element<T>(group_offsets[group_index])),
static_cast<double>(col.element<T>(group_offsets[group_index] + valid_count - 1)))
convert_to_floating<double>(col.element<T>(group_offsets[group_index])),
convert_to_floating<double>(
col.element<T>(group_offsets[group_index] + valid_count - 1)))
: thrust::make_tuple(0.0, 0.0);
}
};
Expand All @@ -823,8 +825,8 @@ struct get_scalar_minmax {
__device__ thrust::tuple<double, double> operator()(size_type)
{
return valid_count > 0
? thrust::make_tuple(static_cast<double>(col.element<T>(0)),
static_cast<double>(col.element<T>(valid_count - 1)))
? thrust::make_tuple(convert_to_floating<double>(col.element<T>(0)),
convert_to_floating<double>(col.element<T>(valid_count - 1)))
: thrust::make_tuple(0.0, 0.0);
}
};
Expand Down
16 changes: 12 additions & 4 deletions cpp/src/unary/cast_ops.cu
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2023, NVIDIA CORPORATION.
* Copyright (c) 2019-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -116,8 +116,12 @@ struct fixed_point_unary_cast {
std::enable_if_t<(cudf::is_fixed_point<_SourceT>() && cudf::is_numeric<TargetT>())>* = nullptr>
__device__ inline TargetT operator()(DeviceT const element)
{
auto const fp = SourceT{numeric::scaled_integer<DeviceT>{element, scale}};
return static_cast<TargetT>(fp);
auto const fixed_point = SourceT{numeric::scaled_integer<DeviceT>{element, scale}};
if constexpr (cuda::std::is_floating_point_v<TargetT>) {
return convert_fixed_to_floating<TargetT>(fixed_point);
} else {
return static_cast<TargetT>(fixed_point);
}
}

template <
Expand All @@ -126,7 +130,11 @@ struct fixed_point_unary_cast {
std::enable_if_t<(cudf::is_numeric<_SourceT>() && cudf::is_fixed_point<TargetT>())>* = nullptr>
__device__ inline DeviceT operator()(SourceT const element)
{
return TargetT{element, scale}.value();
if constexpr (cuda::std::is_floating_point_v<SourceT>) {
return convert_floating_to_fixed<TargetT>(element, scale).value();
} else {
return TargetT{element, scale}.value();
}
}
};

Expand Down
Loading

0 comments on commit 888e9d5

Please sign in to comment.