From 888e9d5c38cb27402313681744b87462846bc405 Mon Sep 17 00:00:00 2001 From: Paul Mattione <156858817+pmattione-nvidia@users.noreply.github.com> Date: Wed, 10 Apr 2024 17:56:10 -0400 Subject: [PATCH] Floating <--> fixed-point conversion must now be called explicitly (#15438) This change makes it so fixed_point objects can no longer be constructed with floating point values, and can no longer be casted to floating point values. Instead the functions added to unary.hpp must be explicitly called. In addition to making it more clear when and where these conversions are occurring, this also makes it so that the low-level fixed_point.hpp header won't be inundated with all of the complex lossless conversion code to come. Authors: - Paul Mattione (https://github.com/pmattione-nvidia) Approvers: - Karthikeyan (https://github.com/karthikeyann) - Shruti Shivakumar (https://github.com/shrshi) - Mark Harris (https://github.com/harrism) URL: https://github.com/rapidsai/cudf/pull/15438 --- cpp/include/cudf/fixed_point/fixed_point.hpp | 49 +---- cpp/include/cudf/unary.hpp | 75 ++++++- cpp/include/cudf/utilities/traits.hpp | 7 +- cpp/src/binaryop/compiled/binary_ops.cuh | 19 +- cpp/src/quantiles/quantiles_util.hpp | 9 +- .../quantiles/tdigest/tdigest_aggregation.cu | 14 +- cpp/src/unary/cast_ops.cu | 16 +- cpp/tests/fixed_point/fixed_point_tests.cpp | 189 +++++++++--------- cpp/tests/io/orc_test.cpp | 2 +- 9 files changed, 219 insertions(+), 161 deletions(-) diff --git a/cpp/include/cudf/fixed_point/fixed_point.hpp b/cpp/include/cudf/fixed_point/fixed_point.hpp index 4445af6c5a8..e39d75757e8 100644 --- a/cpp/include/cudf/fixed_point/fixed_point.hpp +++ b/cpp/include/cudf/fixed_point/fixed_point.hpp @@ -67,18 +67,6 @@ constexpr inline auto is_supported_representation_type() cuda::std::is_same_v; } -/** - * @brief Returns `true` if the value type is supported for constructing a `fixed_point` - * - * @tparam T The construction value type - * @return `true` if the value type is supported to construct a `fixed_point` type - */ -template -constexpr inline auto is_supported_construction_value_type() -{ - return cuda::std::is_integral() || cuda::std::is_floating_point_v; -} - /** @} */ // end of group // Helper functions for `fixed_point` type @@ -222,23 +210,8 @@ class fixed_point { scale_type _scale; public: - using rep = Rep; ///< The representation type - - /** - * @brief Constructor that will perform shifting to store value appropriately (from floating point - * types) - * - * @tparam T The floating point type that you are constructing from - * @param value The value that will be constructed from - * @param scale The exponent that is applied to Rad to perform shifting - */ - template () && - is_supported_representation_type()>* = nullptr> - CUDF_HOST_DEVICE inline explicit fixed_point(T const& value, scale_type const& scale) - : _value{static_cast(detail::shift(value, scale))}, _scale{scale} - { - } + using rep = Rep; ///< The representation type + static constexpr auto rad = Rad; ///< The base /** * @brief Constructor that will perform shifting to store value appropriately (from integral @@ -249,7 +222,7 @@ class fixed_point { * @param scale The exponent that is applied to Rad to perform shifting */ template () && + typename cuda::std::enable_if_t && is_supported_representation_type()>* = nullptr> CUDF_HOST_DEVICE inline explicit fixed_point(T const& value, scale_type const& scale) // `value` is cast to `Rep` to avoid overflow in cases where @@ -275,8 +248,7 @@ class fixed_point { * @tparam T The value type being constructing from * @param value The value that will be constructed from */ - template ()>* = nullptr> + template >* = nullptr> CUDF_HOST_DEVICE inline fixed_point(T const& value) : _value{static_cast(value)}, _scale{scale_type{0}} { @@ -288,19 +260,6 @@ class fixed_point { */ CUDF_HOST_DEVICE inline fixed_point() : _scale{scale_type{0}} {} - /** - * @brief Explicit conversion operator for casting to floating point types - * - * @tparam U The floating point type that is being explicitly converted to - * @return The `fixed_point` number in base 10 (aka human readable format) - */ - template >* = nullptr> - explicit constexpr operator U() const - { - return detail::shift(static_cast(_value), scale_type{-_scale}); - } - /** * @brief Explicit conversion operator for casting to integral types * diff --git a/cpp/include/cudf/unary.hpp b/cpp/include/cudf/unary.hpp index 64e802d88dd..5ded22488c7 100644 --- a/cpp/include/cudf/unary.hpp +++ b/cpp/include/cudf/unary.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2023, NVIDIA CORPORATION. + * Copyright (c) 2018-2024, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,8 +16,10 @@ #pragma once +#include #include #include +#include #include @@ -31,6 +33,77 @@ namespace cudf { * @brief Column APIs for unary ops */ +/** + * @brief Convert a floating-point value to fixed point + * + * @note This conversion was moved from fixed-point member functions to free functions. + * This is so that the complex conversion code is not included into many parts of the + * code base that don't need it, and so that it's more obvious to pinpoint where these + * conversions are occurring. + * + * @tparam Fixed The fixed-point type to convert to + * @tparam Floating The floating-point type to convert from + * @param floating The floating-point value to convert + * @param scale The desired scale of the fixed-point value + * @return The converted fixed-point value + */ +template () && + cuda::std::is_floating_point_v>* = nullptr> +CUDF_HOST_DEVICE Fixed convert_floating_to_fixed(Floating floating, numeric::scale_type scale) +{ + using Rep = typename Fixed::rep; + auto const shifted = numeric::detail::shift(floating, scale); + numeric::scaled_integer scaled{static_cast(shifted), scale}; + return Fixed(scaled); +} + +/** + * @brief Convert a fixed-point value to floating point + * + * @note This conversion was moved from fixed-point member functions to free functions. + * This is so that the complex conversion code is not included into many parts of the + * code base that don't need it, and so that it's more obvious to pinpoint where these + * conversions are occurring. + * + * @tparam Floating The floating-point type to convert to + * @tparam Fixed The fixed-point type to convert from + * @param fixed The fixed-point value to convert + * @return The converted floating-point value + */ +template && + is_fixed_point()>* = nullptr> +CUDF_HOST_DEVICE Floating convert_fixed_to_floating(Fixed fixed) +{ + using Rep = typename Fixed::rep; + auto const casted = static_cast(fixed.value()); + auto const scale = numeric::scale_type{-fixed.scale()}; + return numeric::detail::shift(casted, scale); +} + +/** + * @brief Convert a value to floating point + * + * @tparam Floating The floating-point type to convert to + * @tparam Input The input type to convert from + * @param input The input value to convert + * @return The converted floating-point value + */ +template >* = nullptr> +CUDF_HOST_DEVICE Floating convert_to_floating(Input input) +{ + if constexpr (is_fixed_point()) { + return convert_fixed_to_floating(input); + } else { + return static_cast(input); + } +} + /** * @brief Types of unary operations that can be performed on data. */ diff --git a/cpp/include/cudf/utilities/traits.hpp b/cpp/include/cudf/utilities/traits.hpp index 2dda0740b96..d191e44228a 100644 --- a/cpp/include/cudf/utilities/traits.hpp +++ b/cpp/include/cudf/utilities/traits.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2023, NVIDIA CORPORATION. + * Copyright (c) 2019-2024, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -397,7 +397,10 @@ template constexpr inline bool is_fixed_point() { return std::is_same_v || std::is_same_v || - std::is_same_v; + std::is_same_v || + std::is_same_v, T> || + std::is_same_v, T> || + std::is_same_v, T>; } /** diff --git a/cpp/src/binaryop/compiled/binary_ops.cuh b/cpp/src/binaryop/compiled/binary_ops.cuh index d605c877d3f..0bc144baa83 100644 --- a/cpp/src/binaryop/compiled/binary_ops.cuh +++ b/cpp/src/binaryop/compiled/binary_ops.cuh @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -69,13 +70,17 @@ struct typed_casted_writer { if constexpr (mutable_column_device_view::has_element_accessor() and std::is_constructible_v) { col.element(i) = static_cast(val); - } else if constexpr (is_fixed_point() and - (is_fixed_point() or - std::is_constructible_v)) { - if constexpr (is_fixed_point()) - col.data()[i] = val.rescaled(numeric::scale_type{col.type().scale()}).value(); - else - col.data()[i] = Element{val, numeric::scale_type{col.type().scale()}}.value(); + } else if constexpr (is_fixed_point()) { + auto const scale = numeric::scale_type{col.type().scale()}; + if constexpr (is_fixed_point()) { + col.data()[i] = val.rescaled(scale).value(); + } else if constexpr (cuda::std::is_constructible_v) { + col.data()[i] = Element{val, scale}.value(); + } else if constexpr (cuda::std::is_floating_point_v) { + col.data()[i] = convert_floating_to_fixed(val, scale).value(); + } + } else if constexpr (cuda::std::is_floating_point_v and is_fixed_point()) { + col.data()[i] = convert_fixed_to_floating(val); } } }; diff --git a/cpp/src/quantiles/quantiles_util.hpp b/cpp/src/quantiles/quantiles_util.hpp index 5efafdd0be6..47864c25c5f 100644 --- a/cpp/src/quantiles/quantiles_util.hpp +++ b/cpp/src/quantiles/quantiles_util.hpp @@ -16,6 +16,7 @@ #include #include +#include #include #include @@ -46,8 +47,8 @@ CUDF_HOST_DEVICE inline Result linear(T lhs, T rhs, double frac) // Underflow may occur when converting int64 to double // detail: https://github.com/rapidsai/cudf/issues/1417 - auto dlhs = static_cast(lhs); - auto drhs = static_cast(rhs); + auto dlhs = convert_to_floating(lhs); + auto drhs = convert_to_floating(rhs); double one_minus_frac = 1.0 - frac; return static_cast(one_minus_frac * dlhs + frac * drhs); } @@ -56,8 +57,8 @@ template CUDF_HOST_DEVICE inline Result midpoint(T lhs, T rhs) { // TODO: try std::midpoint (C++20) if available - auto dlhs = static_cast(lhs); - auto drhs = static_cast(rhs); + auto dlhs = convert_to_floating(lhs); + auto drhs = convert_to_floating(rhs); return static_cast(dlhs / 2 + drhs / 2); } diff --git a/cpp/src/quantiles/tdigest/tdigest_aggregation.cu b/cpp/src/quantiles/tdigest/tdigest_aggregation.cu index 56e1bfbe003..8544d9caa56 100644 --- a/cpp/src/quantiles/tdigest/tdigest_aggregation.cu +++ b/cpp/src/quantiles/tdigest/tdigest_aggregation.cu @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -73,7 +74,7 @@ struct make_centroid { centroid operator() __device__(size_type index) const { auto const is_valid = col.is_valid(index); - auto const mean = is_valid ? static_cast(col.element(index)) : 0.0; + auto const mean = is_valid ? convert_to_floating(col.element(index)) : 0.0; auto const weight = is_valid ? 1.0 : 0.0; return {mean, weight, is_valid}; } @@ -87,7 +88,7 @@ struct make_centroid_no_nulls { centroid operator() __device__(size_type index) const { - return {static_cast(col.element(index)), 1.0, true}; + return {convert_to_floating(col.element(index)), 1.0, true}; } }; @@ -808,8 +809,9 @@ struct get_scalar_minmax_grouped { auto const valid_count = group_valid_counts[group_index]; return valid_count > 0 ? thrust::make_tuple( - static_cast(col.element(group_offsets[group_index])), - static_cast(col.element(group_offsets[group_index] + valid_count - 1))) + convert_to_floating(col.element(group_offsets[group_index])), + convert_to_floating( + col.element(group_offsets[group_index] + valid_count - 1))) : thrust::make_tuple(0.0, 0.0); } }; @@ -823,8 +825,8 @@ struct get_scalar_minmax { __device__ thrust::tuple operator()(size_type) { return valid_count > 0 - ? thrust::make_tuple(static_cast(col.element(0)), - static_cast(col.element(valid_count - 1))) + ? thrust::make_tuple(convert_to_floating(col.element(0)), + convert_to_floating(col.element(valid_count - 1))) : thrust::make_tuple(0.0, 0.0); } }; diff --git a/cpp/src/unary/cast_ops.cu b/cpp/src/unary/cast_ops.cu index 47a0cb393aa..b6c9b3caa20 100644 --- a/cpp/src/unary/cast_ops.cu +++ b/cpp/src/unary/cast_ops.cu @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2023, NVIDIA CORPORATION. + * Copyright (c) 2019-2024, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -116,8 +116,12 @@ struct fixed_point_unary_cast { std::enable_if_t<(cudf::is_fixed_point<_SourceT>() && cudf::is_numeric())>* = nullptr> __device__ inline TargetT operator()(DeviceT const element) { - auto const fp = SourceT{numeric::scaled_integer{element, scale}}; - return static_cast(fp); + auto const fixed_point = SourceT{numeric::scaled_integer{element, scale}}; + if constexpr (cuda::std::is_floating_point_v) { + return convert_fixed_to_floating(fixed_point); + } else { + return static_cast(fixed_point); + } } template < @@ -126,7 +130,11 @@ struct fixed_point_unary_cast { std::enable_if_t<(cudf::is_numeric<_SourceT>() && cudf::is_fixed_point())>* = nullptr> __device__ inline DeviceT operator()(SourceT const element) { - return TargetT{element, scale}.value(); + if constexpr (cuda::std::is_floating_point_v) { + return convert_floating_to_fixed(element, scale).value(); + } else { + return TargetT{element, scale}.value(); + } } }; diff --git a/cpp/tests/fixed_point/fixed_point_tests.cpp b/cpp/tests/fixed_point/fixed_point_tests.cpp index 1c1680fcd6e..73de1fbaa68 100644 --- a/cpp/tests/fixed_point/fixed_point_tests.cpp +++ b/cpp/tests/fixed_point/fixed_point_tests.cpp @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -45,67 +46,71 @@ TYPED_TEST(FixedPointTestAllReps, SimpleDecimalXXConstruction) { using decimalXX = fixed_point; - decimalXX num0{1.234567, scale_type{0}}; - decimalXX num1{1.234567, scale_type{-1}}; - decimalXX num2{1.234567, scale_type{-2}}; - decimalXX num3{1.234567, scale_type{-3}}; - decimalXX num4{1.234567, scale_type{-4}}; - decimalXX num5{1.234567, scale_type{-5}}; - decimalXX num6{1.234567, scale_type{-6}}; - - EXPECT_EQ(1, static_cast(num0)); - EXPECT_EQ(1.2, static_cast(num1)); - EXPECT_EQ(1.23, static_cast(num2)); - EXPECT_EQ(1.234, static_cast(num3)); - EXPECT_EQ(1.2345, static_cast(num4)); - EXPECT_EQ(1.23456, static_cast(num5)); - EXPECT_EQ(1.234567, static_cast(num6)); + auto num0 = cudf::convert_floating_to_fixed(1.234567, scale_type(0)); + auto num1 = cudf::convert_floating_to_fixed(1.234567, scale_type(-1)); + auto num2 = cudf::convert_floating_to_fixed(1.234567, scale_type(-2)); + auto num3 = cudf::convert_floating_to_fixed(1.234567, scale_type(-3)); + auto num4 = cudf::convert_floating_to_fixed(1.234567, scale_type(-4)); + auto num5 = cudf::convert_floating_to_fixed(1.234567, scale_type(-5)); + auto num6 = cudf::convert_floating_to_fixed(1.234567, scale_type(-6)); + + EXPECT_EQ(1, cudf::convert_fixed_to_floating(num0)); + EXPECT_EQ(1.2, cudf::convert_fixed_to_floating(num1)); + EXPECT_EQ(1.23, cudf::convert_fixed_to_floating(num2)); + EXPECT_EQ(1.234, cudf::convert_fixed_to_floating(num3)); + EXPECT_EQ(1.2345, cudf::convert_fixed_to_floating(num4)); + EXPECT_EQ(1.23456, cudf::convert_fixed_to_floating(num5)); + EXPECT_EQ(1.234567, cudf::convert_fixed_to_floating(num6)); } TYPED_TEST(FixedPointTestAllReps, SimpleNegativeDecimalXXConstruction) { using decimalXX = fixed_point; - decimalXX num0{-1.234567, scale_type{0}}; - decimalXX num1{-1.234567, scale_type{-1}}; - decimalXX num2{-1.234567, scale_type{-2}}; - decimalXX num3{-1.234567, scale_type{-3}}; - decimalXX num4{-1.234567, scale_type{-4}}; - decimalXX num5{-1.234567, scale_type{-5}}; - decimalXX num6{-1.234567, scale_type{-6}}; - - EXPECT_EQ(-1, static_cast(num0)); - EXPECT_EQ(-1.2, static_cast(num1)); - EXPECT_EQ(-1.23, static_cast(num2)); - EXPECT_EQ(-1.234, static_cast(num3)); - EXPECT_EQ(-1.2345, static_cast(num4)); - EXPECT_EQ(-1.23456, static_cast(num5)); - EXPECT_EQ(-1.234567, static_cast(num6)); + auto num0 = cudf::convert_floating_to_fixed(-1.234567, scale_type(0)); + auto num1 = cudf::convert_floating_to_fixed(-1.234567, scale_type(-1)); + auto num2 = cudf::convert_floating_to_fixed(-1.234567, scale_type(-2)); + auto num3 = cudf::convert_floating_to_fixed(-1.234567, scale_type(-3)); + auto num4 = cudf::convert_floating_to_fixed(-1.234567, scale_type(-4)); + auto num5 = cudf::convert_floating_to_fixed(-1.234567, scale_type(-5)); + auto num6 = cudf::convert_floating_to_fixed(-1.234567, scale_type(-6)); + + EXPECT_EQ(-1, cudf::convert_fixed_to_floating(num0)); + EXPECT_EQ(-1.2, cudf::convert_fixed_to_floating(num1)); + EXPECT_EQ(-1.23, cudf::convert_fixed_to_floating(num2)); + EXPECT_EQ(-1.234, cudf::convert_fixed_to_floating(num3)); + EXPECT_EQ(-1.2345, cudf::convert_fixed_to_floating(num4)); + EXPECT_EQ(-1.23456, cudf::convert_fixed_to_floating(num5)); + EXPECT_EQ(-1.234567, cudf::convert_fixed_to_floating(num6)); } TYPED_TEST(FixedPointTestAllReps, PaddedDecimalXXConstruction) { using decimalXX = fixed_point; - decimalXX a{1.1, scale_type{-1}}; - decimalXX b{1.01, scale_type{-2}}; - decimalXX c{1.001, scale_type{-3}}; - decimalXX d{1.0001, scale_type{-4}}; - decimalXX e{1.00001, scale_type{-5}}; - decimalXX f{1.000001, scale_type{-6}}; - - decimalXX x{1.000123, scale_type{-8}}; - decimalXX y{0.000123, scale_type{-8}}; - - EXPECT_EQ(1.1, static_cast(a)); - EXPECT_EQ(1.01, static_cast(b)); - EXPECT_EQ(1, static_cast(c)); // intentional (inherited problem from floating point) - EXPECT_EQ(1.0001, static_cast(d)); - EXPECT_EQ(1.00001, static_cast(e)); - EXPECT_EQ(1, static_cast(f)); // intentional (inherited problem from floating point) - - EXPECT_TRUE(1.000123 - static_cast(x) < std::numeric_limits::epsilon()); - EXPECT_EQ(0.000123, static_cast(y)); + auto a = cudf::convert_floating_to_fixed(1.1, scale_type(-1)); + auto b = cudf::convert_floating_to_fixed(1.01, scale_type(-2)); + auto c = cudf::convert_floating_to_fixed(1.001, scale_type(-3)); + auto d = cudf::convert_floating_to_fixed(1.0001, scale_type(-4)); + auto e = cudf::convert_floating_to_fixed(1.00001, scale_type(-5)); + auto f = cudf::convert_floating_to_fixed(1.000001, scale_type(-6)); + auto x = cudf::convert_floating_to_fixed(1.000123, scale_type(-8)); + auto y = cudf::convert_floating_to_fixed(0.000123, scale_type(-8)); + + EXPECT_EQ(1.1, cudf::convert_fixed_to_floating(a)); + EXPECT_EQ(1.01, cudf::convert_fixed_to_floating(b)); + EXPECT_EQ(1, + cudf::convert_fixed_to_floating( + c)); // intentional (inherited problem from floating point) + EXPECT_EQ(1.0001, cudf::convert_fixed_to_floating(d)); + EXPECT_EQ(1.00001, cudf::convert_fixed_to_floating(e)); + EXPECT_EQ(1, + cudf::convert_fixed_to_floating( + f)); // intentional (inherited problem from floating point) + + EXPECT_TRUE(1.000123 - cudf::convert_fixed_to_floating(x) < + std::numeric_limits::epsilon()); + EXPECT_EQ(0.000123, cudf::convert_fixed_to_floating(y)); } TYPED_TEST(FixedPointTestAllReps, SimpleBinaryFPConstruction) @@ -118,34 +123,34 @@ TYPED_TEST(FixedPointTestAllReps, SimpleBinaryFPConstruction) binary_fp num3{10, scale_type{3}}; binary_fp num4{10, scale_type{4}}; - binary_fp num5{1.24, scale_type{0}}; - binary_fp num6{1.24, scale_type{-1}}; - binary_fp num7{1.32, scale_type{-2}}; - binary_fp num8{1.41, scale_type{-3}}; - binary_fp num9{1.45, scale_type{-4}}; - - EXPECT_EQ(10, static_cast(num0)); - EXPECT_EQ(10, static_cast(num1)); - EXPECT_EQ(8, static_cast(num2)); - EXPECT_EQ(8, static_cast(num3)); - EXPECT_EQ(0, static_cast(num4)); - - EXPECT_EQ(1, static_cast(num5)); - EXPECT_EQ(1, static_cast(num6)); - EXPECT_EQ(1.25, static_cast(num7)); - EXPECT_EQ(1.375, static_cast(num8)); - EXPECT_EQ(1.4375, static_cast(num9)); + auto num5 = cudf::convert_floating_to_fixed(1.24, scale_type(0)); + auto num6 = cudf::convert_floating_to_fixed(1.24, scale_type(-1)); + auto num7 = cudf::convert_floating_to_fixed(1.32, scale_type(-2)); + auto num8 = cudf::convert_floating_to_fixed(1.41, scale_type(-3)); + auto num9 = cudf::convert_floating_to_fixed(1.45, scale_type(-4)); + + EXPECT_EQ(10, cudf::convert_fixed_to_floating(num0)); + EXPECT_EQ(10, cudf::convert_fixed_to_floating(num1)); + EXPECT_EQ(8, cudf::convert_fixed_to_floating(num2)); + EXPECT_EQ(8, cudf::convert_fixed_to_floating(num3)); + EXPECT_EQ(0, cudf::convert_fixed_to_floating(num4)); + + EXPECT_EQ(1, cudf::convert_fixed_to_floating(num5)); + EXPECT_EQ(1, cudf::convert_fixed_to_floating(num6)); + EXPECT_EQ(1.25, cudf::convert_fixed_to_floating(num7)); + EXPECT_EQ(1.375, cudf::convert_fixed_to_floating(num8)); + EXPECT_EQ(1.4375, cudf::convert_fixed_to_floating(num9)); } TYPED_TEST(FixedPointTestAllReps, MoreSimpleBinaryFPConstruction) { using binary_fp = fixed_point; - binary_fp num0{1.25, scale_type{-2}}; - binary_fp num1{2.1, scale_type{-4}}; + auto num0 = cudf::convert_floating_to_fixed(1.25, scale_type(-2)); + auto num1 = cudf::convert_floating_to_fixed(2.1, scale_type(-4)); - EXPECT_EQ(1.25, static_cast(num0)); - EXPECT_EQ(2.0625, static_cast(num1)); + EXPECT_EQ(1.25, cudf::convert_fixed_to_floating(num0)); + EXPECT_EQ(2.0625, cudf::convert_fixed_to_floating(num1)); } TYPED_TEST(FixedPointTestAllReps, SimpleDecimalXXMath) @@ -166,7 +171,7 @@ TYPED_TEST(FixedPointTestAllReps, SimpleDecimalXXMath) EXPECT_EQ(TWO / ONE, TWO); EXPECT_EQ(SIX / TWO, THREE); - decimalXX a{1.23, scale_type{-2}}; + auto a = cudf::convert_floating_to_fixed(1.23, scale_type(-2)); decimalXX b{0, scale_type{0}}; EXPECT_EQ(a + b, a); @@ -211,8 +216,8 @@ TYPED_TEST(FixedPointTestAllReps, DecimalXXTrickyDivision) EXPECT_EQ(SIXTY_1 / TEN_0, ONE_1); EXPECT_EQ(SIXTY_1 / TEN_1, SIX_0); - decimalXX A{34.56, scale_type{-2}}; - decimalXX B{1.234, scale_type{-3}}; + auto A = cudf::convert_floating_to_fixed(34.56, scale_type(-2)); + auto B = cudf::convert_floating_to_fixed(1.234, scale_type(-3)); decimalXX C{1, scale_type{-2}}; EXPECT_EQ(static_cast(A / B), 20); @@ -255,17 +260,17 @@ TYPED_TEST(FixedPointTestAllReps, ArithmeticWithDifferentScales) using decimalXX = fixed_point; decimalXX a{1, scale_type{0}}; - decimalXX b{1.2, scale_type{-1}}; - decimalXX c{1.23, scale_type{-2}}; - decimalXX d{1.111, scale_type{-3}}; + auto b = cudf::convert_floating_to_fixed(1.2, scale_type(-1)); + auto c = cudf::convert_floating_to_fixed(1.23, scale_type(-2)); + auto d = cudf::convert_floating_to_fixed(1.111, scale_type(-3)); - decimalXX x{2.2, scale_type{-1}}; - decimalXX y{3.43, scale_type{-2}}; - decimalXX z{4.541, scale_type{-3}}; + auto x = cudf::convert_floating_to_fixed(2.2, scale_type(-1)); + auto y = cudf::convert_floating_to_fixed(3.43, scale_type(-2)); + auto z = cudf::convert_floating_to_fixed(4.541, scale_type(-3)); - decimalXX xx{0.2, scale_type{-1}}; - decimalXX yy{0.03, scale_type{-2}}; - decimalXX zz{0.119, scale_type{-3}}; + auto xx = cudf::convert_floating_to_fixed(0.2, scale_type(-1)); + auto yy = cudf::convert_floating_to_fixed(0.03, scale_type(-2)); + auto zz = cudf::convert_floating_to_fixed(0.119, scale_type(-3)); EXPECT_EQ(a + b, x); EXPECT_EQ(a + b + c, y); @@ -280,12 +285,12 @@ TYPED_TEST(FixedPointTestAllReps, RescaledTest) using decimalXX = fixed_point; decimalXX num0{1, scale_type{0}}; - decimalXX num1{1.2, scale_type{-1}}; - decimalXX num2{1.23, scale_type{-2}}; - decimalXX num3{1.234, scale_type{-3}}; - decimalXX num4{1.2345, scale_type{-4}}; - decimalXX num5{1.23456, scale_type{-5}}; - decimalXX num6{1.234567, scale_type{-6}}; + auto num1 = cudf::convert_floating_to_fixed(1.2, scale_type(-1)); + auto num2 = cudf::convert_floating_to_fixed(1.23, scale_type(-2)); + auto num3 = cudf::convert_floating_to_fixed(1.234, scale_type(-3)); + auto num4 = cudf::convert_floating_to_fixed(1.2345, scale_type(-4)); + auto num5 = cudf::convert_floating_to_fixed(1.23456, scale_type(-5)); + auto num6 = cudf::convert_floating_to_fixed(1.234567, scale_type(-6)); EXPECT_EQ(num0, num6.rescaled(scale_type{0})); EXPECT_EQ(num1, num6.rescaled(scale_type{-1})); @@ -314,7 +319,7 @@ TYPED_TEST(FixedPointTestAllReps, BoolConversion) { using decimalXX = fixed_point; - decimalXX truthy_value{1.234567, scale_type{0}}; + auto truthy_value = cudf::convert_floating_to_fixed(1.234567, scale_type(0)); decimalXX falsy_value{0, scale_type{0}}; // Test explicit conversions @@ -442,12 +447,14 @@ void float_vector_test(ValueType const initial_value, std::vector vec1(size); std::vector vec2(size); - std::iota(std::begin(vec1), std::end(vec1), decimal32{initial_value, scale_type{scale}}); + auto decimal_input = cudf::convert_floating_to_fixed(initial_value, scale_type{scale}); + std::iota(std::begin(vec1), std::end(vec1), decimal_input); std::iota(std::begin(vec2), std::end(vec2), initial_value); auto equal = std::equal( std::cbegin(vec1), std::cend(vec1), std::cbegin(vec2), [](auto const& a, auto const& b) { - return static_cast(a) - b <= std::numeric_limits::epsilon(); + return cudf::convert_fixed_to_floating(a) - b <= + std::numeric_limits::epsilon(); }); EXPECT_TRUE(equal); diff --git a/cpp/tests/io/orc_test.cpp b/cpp/tests/io/orc_test.cpp index e108e68e1f9..a544a812efb 100644 --- a/cpp/tests/io/orc_test.cpp +++ b/cpp/tests/io/orc_test.cpp @@ -548,7 +548,7 @@ TEST_F(OrcWriterTest, SlicedTable) int32_col col0(seq_col0.begin(), seq_col0.end()); str_col col1(strings.begin(), strings.end()); float32_col col2(seq_col2.begin(), seq_col2.end()); - float32_col col3(seq_col3, seq_col3 + num_rows); + dec64_col col3(seq_col3, seq_col3 + num_rows); list_col col4{ {9, 8}, {7, 6, 5}, {}, {4}, {3, 2, 1, 0}, {20, 21, 22, 23, 24}, {}, {66, 666}};