Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make device_memory_resource::do_get_mem_info() and supports_get_mem_info() not pure virtual. Remove derived implementations and calls in RMM #1430

Merged
merged 4 commits into from
Jan 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 1 addition & 20 deletions benchmarks/utilities/simulated_memory_resource.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
* Copyright (c) 2020-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -59,13 +59,6 @@ class simulated_memory_resource final : public device_memory_resource {
*/
[[nodiscard]] bool supports_streams() const noexcept override { return false; }

/**
* @brief Query whether the resource supports the get_mem_info API.
*
* @return false
*/
[[nodiscard]] bool supports_get_mem_info() const noexcept override { return false; }

private:
/**
* @brief Allocates memory of size at least `bytes`.
Expand Down Expand Up @@ -95,18 +88,6 @@ class simulated_memory_resource final : public device_memory_resource {
*/
void do_deallocate(void* ptr, std::size_t, cuda_stream_view) override {}

/**
* @brief Get free and available memory for memory resource.
*
* @param stream to execute on.
* @return std::pair containing free_size and total_size of memory.
*/
[[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(
cuda_stream_view stream) const override
{
return std::make_pair(0, 0);
}

char* begin_{};
char* end_{};
};
Expand Down
26 changes: 0 additions & 26 deletions include/rmm/mr/device/aligned_resource_adaptor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -96,16 +96,6 @@ class aligned_resource_adaptor final : public device_memory_resource {
return upstream_->supports_streams();
}

/**
* @brief Query whether the resource supports the get_mem_info API.
*
* @return bool true if the upstream resource supports get_mem_info, false otherwise.
*/
[[nodiscard]] bool supports_get_mem_info() const noexcept override
{
return upstream_->supports_get_mem_info();
}

/**
* @brief The default alignment used by the adaptor.
*/
Expand Down Expand Up @@ -183,22 +173,6 @@ class aligned_resource_adaptor final : public device_memory_resource {
alignment_ == cast->alignment_ && alignment_threshold_ == cast->alignment_threshold_;
}

/**
* @brief Get free and available memory from upstream resource.
*
* The free size may not be fully allocatable because of alignment requirements.
*
* @throws rmm::cuda_error if unable to retrieve memory info.
*
* @param stream Stream on which to get the mem info.
* @return std::pair containing free_size and total_size of memory
*/
[[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(
cuda_stream_view stream) const override
{
return upstream_->get_mem_info(stream);
}

/**
* @brief Calculate the allocation size needed from upstream to account for alignments of both the
* size and the base pointer.
Expand Down
19 changes: 0 additions & 19 deletions include/rmm/mr/device/arena_memory_resource.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -118,13 +118,6 @@ class arena_memory_resource final : public device_memory_resource {
*/
bool supports_streams() const noexcept override { return true; }

/**
* @brief Query whether the resource supports the get_mem_info API.
*
* @return bool false.
*/
bool supports_get_mem_info() const noexcept override { return false; }

private:
using global_arena = rmm::mr::detail::arena::global_arena<Upstream>;
using arena = rmm::mr::detail::arena::arena<Upstream>;
Expand Down Expand Up @@ -312,18 +305,6 @@ class arena_memory_resource final : public device_memory_resource {
}
}

/**
* @brief Get free and available memory for memory resource.
*
* @param stream to execute on.
* @return std::pair containing free_size and total_size of memory.
*/
std::pair<std::size_t, std::size_t> do_get_mem_info(
[[maybe_unused]] cuda_stream_view stream) const override
{
return std::make_pair(0, 0);
}

/**
* Dump memory to log.
*
Expand Down
21 changes: 0 additions & 21 deletions include/rmm/mr/device/binning_memory_resource.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -106,13 +106,6 @@ class binning_memory_resource final : public device_memory_resource {
*/
[[nodiscard]] bool supports_streams() const noexcept override { return true; }

/**
* @brief Query whether the resource supports the get_mem_info API.
*
* @return false
*/
[[nodiscard]] bool supports_get_mem_info() const noexcept override { return false; }

/**
* @brief Get the upstream memory_resource object.
*
Expand Down Expand Up @@ -197,20 +190,6 @@ class binning_memory_resource final : public device_memory_resource {
if (res != nullptr) { res->deallocate(ptr, bytes, stream); }
}

/**
* @brief Get free and available memory for memory resource
*
* @throws std::runtime_error if we could not get free / total memory
*
* @param stream the stream being executed on
* @return std::pair with available and free memory for resource
*/
[[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(
[[maybe_unused]] cuda_stream_view stream) const override
{
return std::make_pair(0, 0);
}

Upstream* upstream_mr_; // The upstream memory_resource from which to allocate blocks.

std::vector<std::unique_ptr<fixed_size_memory_resource<Upstream>>> owned_bin_resources_;
Expand Down
8 changes: 1 addition & 7 deletions include/rmm/mr/device/callback_memory_resource.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
* Copyright (c) 2022-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -138,13 +138,7 @@ class callback_memory_resource final : public device_memory_resource {
deallocate_callback_(ptr, bytes, stream, deallocate_callback_arg_);
}

[[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(cuda_stream_view) const override
{
throw std::runtime_error("cannot get free / total memory");
}

[[nodiscard]] bool supports_streams() const noexcept override { return false; }
[[nodiscard]] bool supports_get_mem_info() const noexcept override { return false; }

allocate_callback_t allocate_callback_;
deallocate_callback_t deallocate_callback_;
Expand Down
20 changes: 0 additions & 20 deletions include/rmm/mr/device/cuda_async_memory_resource.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -164,13 +164,6 @@ class cuda_async_memory_resource final : public device_memory_resource {
*/
[[nodiscard]] bool supports_streams() const noexcept override { return true; }

/**
* @brief Query whether the resource supports the get_mem_info API.
*
* @return false
*/
[[nodiscard]] bool supports_get_mem_info() const noexcept override { return false; }

private:
#ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT
cuda_async_view_memory_resource pool_{};
Expand Down Expand Up @@ -232,19 +225,6 @@ class cuda_async_memory_resource final : public device_memory_resource {
return async_mr != nullptr;
#endif
}

/**
* @brief Get free and available memory for memory resource
*
* @throws rmm::cuda_error if unable to retrieve memory info.
*
* @return std::pair contaiing free_size and total_size of memory
*/
[[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(
rmm::cuda_stream_view) const override
{
return std::make_pair(0, 0);
}
};

/** @} */ // end of group
Expand Down
20 changes: 0 additions & 20 deletions include/rmm/mr/device/cuda_async_view_memory_resource.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -99,13 +99,6 @@ class cuda_async_view_memory_resource final : public device_memory_resource {
*/
[[nodiscard]] bool supports_streams() const noexcept override { return true; }

/**
* @brief Query whether the resource supports the get_mem_info API.
*
* @return true
*/
[[nodiscard]] bool supports_get_mem_info() const noexcept override { return false; }

private:
#ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT
cudaMemPool_t cuda_pool_handle_{};
Expand Down Expand Up @@ -169,19 +162,6 @@ class cuda_async_view_memory_resource final : public device_memory_resource {
{
return dynamic_cast<cuda_async_view_memory_resource const*>(&other) != nullptr;
}

/**
* @brief Get free and available memory for memory resource
*
* @throws rmm::cuda_error if unable to retrieve memory info.
*
* @return std::pair contaiing free_size and total_size of memory
*/
[[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(
rmm::cuda_stream_view) const override
{
return std::make_pair(0, 0);
}
};

/** @} */ // end of group
Expand Down
24 changes: 1 addition & 23 deletions include/rmm/mr/device/cuda_memory_resource.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
* Copyright (c) 2019-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -51,13 +51,6 @@ class cuda_memory_resource final : public device_memory_resource {
*/
[[nodiscard]] bool supports_streams() const noexcept override { return false; }

/**
* @brief Query whether the resource supports the get_mem_info API.
*
* @return true
*/
[[nodiscard]] bool supports_get_mem_info() const noexcept override { return true; }

private:
/**
* @brief Allocates memory of size at least \p bytes.
Expand Down Expand Up @@ -108,21 +101,6 @@ class cuda_memory_resource final : public device_memory_resource {
{
return dynamic_cast<cuda_memory_resource const*>(&other) != nullptr;
}

/**
* @brief Get free and available memory for memory resource
*
* @throws rmm::cuda_error if unable to retrieve memory info.
*
* @return std::pair contaiing free_size and total_size of memory
*/
[[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(cuda_stream_view) const override
{
std::size_t free_size{};
std::size_t total_size{};
RMM_CUDA_TRY(cudaMemGetInfo(&free_size, &total_size));
return std::make_pair(free_size, total_size);
}
};
/** @} */ // end of group
} // namespace rmm::mr
7 changes: 5 additions & 2 deletions include/rmm/mr/device/device_memory_resource.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,7 @@ class device_memory_resource {
*
* @return bool true if the resource supports get_mem_info, false otherwise.
*/
[[nodiscard]] virtual bool supports_get_mem_info() const noexcept = 0;
[[nodiscard]] virtual bool supports_get_mem_info() const noexcept { return false; };

/**
* @brief Queries the amount of free and total memory for the resource.
Expand Down Expand Up @@ -384,7 +384,10 @@ class device_memory_resource {
* @return std::pair with available and free memory for resource
*/
[[nodiscard]] virtual std::pair<std::size_t, std::size_t> do_get_mem_info(
cuda_stream_view stream) const = 0;
cuda_stream_view stream) const
{
return {0, 0};
}
};
static_assert(cuda::mr::async_resource_with<device_memory_resource, cuda::mr::device_accessible>);
/** @} */ // end of group
Expand Down
26 changes: 1 addition & 25 deletions include/rmm/mr/device/failure_callback_resource_adaptor.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
* Copyright (c) 2020-2024, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -135,16 +135,6 @@ class failure_callback_resource_adaptor final : public device_memory_resource {
return upstream_->supports_streams();
}

/**
* @brief Query whether the resource supports the get_mem_info API.
*
* @return bool true if the upstream resource supports get_mem_info, false otherwise.
*/
[[nodiscard]] bool supports_get_mem_info() const noexcept override
{
return upstream_->supports_get_mem_info();
}

private:
/**
* @brief Allocates memory of size at least `bytes` using the upstream
Expand Down Expand Up @@ -199,20 +189,6 @@ class failure_callback_resource_adaptor final : public device_memory_resource {
: upstream_->is_equal(other);
}

/**
* @brief Get free and available memory from upstream resource.
*
* @throws rmm::cuda_error if unable to retrieve memory info.
*
* @param stream Stream on which to get the mem info.
* @return std::pair contaiing free_size and total_size of memory
*/
[[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(
cuda_stream_view stream) const override
{
return upstream_->get_mem_info(stream);
}

Upstream* upstream_; // the upstream resource used for satisfying allocation requests
failure_callback_t callback_;
void* callback_arg_;
Expand Down
23 changes: 1 addition & 22 deletions include/rmm/mr/device/fixed_size_memory_resource.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -104,13 +104,6 @@ class fixed_size_memory_resource
*/
[[nodiscard]] bool supports_streams() const noexcept override { return true; }

/**
* @brief Query whether the resource supports the get_mem_info API.
*
* @return false
*/
[[nodiscard]] bool supports_get_mem_info() const noexcept override { return false; }

/**
* @brief Get the upstream memory_resource object.
*
Expand Down Expand Up @@ -211,20 +204,6 @@ class fixed_size_memory_resource
return block_type{ptr};
}

/**
* @brief Get free and available memory for memory resource
*
* @throws std::runtime_error if we could not get free / total memory
*
* @param stream the stream being executed on
* @return std::pair with available and free memory for resource
*/
[[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(
[[maybe_unused]] cuda_stream_view stream) const override
{
return std::make_pair(0, 0);
}

/**
* @brief free all memory allocated using the upstream resource.
*
Expand All @@ -244,7 +223,7 @@ class fixed_size_memory_resource
{
lock_guard lock(this->get_mutex());

auto const [free, total] = get_upstream()->get_mem_info(rmm::cuda_stream_default);
auto const [free, total] = rmm::available_device_memory();
std::cout << "GPU free memory: " << free << " total: " << total << "\n";

std::cout << "upstream_blocks: " << upstream_blocks_.size() << "\n";
Expand Down
Loading