diff --git a/docs/root/version_history/current.rst b/docs/root/version_history/current.rst index 96f9d5c9bd98..61bdd6043fc2 100644 --- a/docs/root/version_history/current.rst +++ b/docs/root/version_history/current.rst @@ -15,7 +15,6 @@ Bug Fixes --------- *Changes expected to improve the state of the world and are unlikely to have negative effects* - Removed Config or Runtime ------------------------- *Normally occurs at the end of the* :ref:`deprecation period ` diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 516dd6aa7b4b..9e5593f9aeba 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -1729,6 +1729,12 @@ void ConnectionManagerImpl::ActiveStream::recreateStream( filter_state->parent(), StreamInfo::FilterState::LifeSpan::FilterChain); } + // Make sure that relevant information makes it from the original stream info + // to the new one. Generally this should consist of all downstream related + // data, and not include upstream related data. + (*connection_manager_.streams_.begin()) + ->filter_manager_.streamInfo() + .setFromForRecreateStream(filter_manager_.streamInfo()); new_stream.decodeHeaders(std::move(request_headers_), !proxy_body); if (proxy_body) { // This functionality is currently only used for internal redirects, which the router only diff --git a/source/common/http/filter_manager.cc b/source/common/http/filter_manager.cc index c8f41af90239..5f0671be3770 100644 --- a/source/common/http/filter_manager.cc +++ b/source/common/http/filter_manager.cc @@ -327,7 +327,7 @@ Buffer::InstancePtr& ActiveStreamDecoderFilter::bufferedData() { return parent_.buffered_request_data_; } -bool ActiveStreamDecoderFilter::complete() { return parent_.state_.remote_decode_complete_; } +bool ActiveStreamDecoderFilter::complete() { return parent_.remoteDecodeComplete(); } void ActiveStreamDecoderFilter::doHeaders(bool end_stream) { parent_.decodeHeaders(this, *parent_.filter_manager_callbacks_.requestHeaders(), end_stream); @@ -843,9 +843,8 @@ void FilterManager::decodeMetadata(ActiveStreamDecoderFilter* filter, MetadataMa } void FilterManager::maybeEndDecode(bool end_stream) { - ASSERT(!state_.remote_decode_complete_); - state_.remote_decode_complete_ = end_stream; - if (end_stream) { + // If recreateStream is called, the HCM rewinds state and may send more encodeData calls. + if (end_stream && !remoteDecodeComplete()) { stream_info_.downstreamTiming().onLastDownstreamRxByteReceived(dispatcher().timeSource()); ENVOY_STREAM_LOG(debug, "request end stream", *this); } diff --git a/source/common/http/filter_manager.h b/source/common/http/filter_manager.h index 9fc0c99b0359..80df07ecb174 100644 --- a/source/common/http/filter_manager.h +++ b/source/common/http/filter_manager.h @@ -907,7 +907,10 @@ class FilterManager : public ScopeTrackedObject, /** * Whether remote processing has been marked as complete. */ - bool remoteDecodeComplete() const { return state_.remote_decode_complete_; } + bool remoteDecodeComplete() const { + return stream_info_.downstreamTiming() && + stream_info_.downstreamTiming()->lastDownstreamRxByteReceived().has_value(); + } /** * Instructs the FilterManager to not create a filter chain. This makes it possible to issue @@ -1058,15 +1061,14 @@ class FilterManager : public ScopeTrackedObject, struct State { State() - : remote_encode_complete_(false), remote_decode_complete_(false), local_complete_(false), - has_1xx_headers_(false), created_filter_chain_(false), is_head_request_(false), - is_grpc_request_(false), non_100_response_headers_encoded_(false), - under_on_local_reply_(false), decoder_filter_chain_aborted_(false), - encoder_filter_chain_aborted_(false), saw_downstream_reset_(false) {} + : remote_encode_complete_(false), local_complete_(false), has_1xx_headers_(false), + created_filter_chain_(false), is_head_request_(false), is_grpc_request_(false), + non_100_response_headers_encoded_(false), under_on_local_reply_(false), + decoder_filter_chain_aborted_(false), encoder_filter_chain_aborted_(false), + saw_downstream_reset_(false) {} uint32_t filter_call_state_{0}; bool remote_encode_complete_ : 1; - bool remote_decode_complete_ : 1; bool local_complete_ : 1; // This indicates that local is complete prior to filter processing. // A filter can still stop the stream from being complete as seen // by the codec. diff --git a/source/common/http/http1/codec_impl.cc b/source/common/http/http1/codec_impl.cc index 92e828f1a36f..5f57f8a02c69 100644 --- a/source/common/http/http1/codec_impl.cc +++ b/source/common/http/http1/codec_impl.cc @@ -1203,8 +1203,10 @@ ParserStatus ServerConnectionImpl::onMessageCompleteBase() { } void ServerConnectionImpl::onResetStream(StreamResetReason reason) { - active_request_->response_encoder_.runResetCallbacks(reason); - connection_.dispatcher().deferredDelete(std::move(active_request_)); + if (active_request_) { + active_request_->response_encoder_.runResetCallbacks(reason); + connection_.dispatcher().deferredDelete(std::move(active_request_)); + } } Status ServerConnectionImpl::sendProtocolError(absl::string_view details) { diff --git a/source/common/runtime/runtime_features.cc b/source/common/runtime/runtime_features.cc index 95855fc4495e..b5ee64e960cc 100644 --- a/source/common/runtime/runtime_features.cc +++ b/source/common/runtime/runtime_features.cc @@ -40,6 +40,7 @@ RUNTIME_GUARD(envoy_reloadable_features_correctly_validate_alpn); RUNTIME_GUARD(envoy_reloadable_features_deprecate_global_ints); RUNTIME_GUARD(envoy_reloadable_features_disable_tls_inspector_injection); RUNTIME_GUARD(envoy_reloadable_features_do_not_await_headers_on_upstream_timeout_to_emit_stats); +RUNTIME_GUARD(envoy_reloadable_features_enable_compression_bomb_protection); RUNTIME_GUARD(envoy_reloadable_features_enable_grpc_async_client_cache); RUNTIME_GUARD(envoy_reloadable_features_fix_added_trailers); RUNTIME_GUARD(envoy_reloadable_features_handle_stream_reset_during_hcm_encoding); diff --git a/source/common/stream_info/stream_info_impl.h b/source/common/stream_info/stream_info_impl.h index 71c2da970df5..b6ea12c33f60 100644 --- a/source/common/stream_info/stream_info_impl.h +++ b/source/common/stream_info/stream_info_impl.h @@ -308,9 +308,23 @@ struct StreamInfoImpl : public StreamInfo { ASSERT(downstream_bytes_meter_.get() == downstream_bytes_meter.get()); } + // This function is used to persist relevant information from the original + // stream into to the new one, when recreating the stream. Generally this + // includes information about the downstream stream, but not the upstream + // stream. + void setFromForRecreateStream(StreamInfo& info) { + downstream_timing_ = info.downstreamTiming(); + protocol_ = info.protocol(); + bytes_received_ = info.bytesReceived(); + downstream_bytes_meter_ = info.getDownstreamBytesMeter(); + // These two are set in the constructor, but to T(recreate), and should be T(create) + start_time_ = info.startTime(); + start_time_monotonic_ = info.startTimeMonotonic(); + } + TimeSource& time_source_; - const SystemTime start_time_; - const MonotonicTime start_time_monotonic_; + SystemTime start_time_; + MonotonicTime start_time_monotonic_; absl::optional final_time_; absl::optional protocol_; diff --git a/source/common/upstream/health_checker_impl.cc b/source/common/upstream/health_checker_impl.cc index 4090ba9dc082..6a8382a825ed 100644 --- a/source/common/upstream/health_checker_impl.cc +++ b/source/common/upstream/health_checker_impl.cc @@ -815,10 +815,17 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onGoAway( // Even if we have active health check probe, fail it on GOAWAY and schedule new one. if (request_encoder_) { handleFailure(envoy::data::core::v3::NETWORK); - expect_reset_ = true; - request_encoder_->getStream().resetStream(Http::StreamResetReason::LocalReset); + // request_encoder_ can already be destroyed if the host was removed during the failure callback + // above. + if (request_encoder_ != nullptr) { + expect_reset_ = true; + request_encoder_->getStream().resetStream(Http::StreamResetReason::LocalReset); + } + } + // client_ can already be destroyed if the host was removed during the failure callback above. + if (client_ != nullptr) { + client_->close(); } - client_->close(); } bool GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::isHealthCheckSucceeded( @@ -852,12 +859,17 @@ void GrpcHealthCheckerImpl::GrpcActiveHealthCheckSession::onRpcComplete( if (end_stream) { resetState(); } else { - // resetState() will be called by onResetStream(). - expect_reset_ = true; - request_encoder_->getStream().resetStream(Http::StreamResetReason::LocalReset); + // request_encoder_ can already be destroyed if the host was removed during the failure callback + // above. + if (request_encoder_ != nullptr) { + // resetState() will be called by onResetStream(). + expect_reset_ = true; + request_encoder_->getStream().resetStream(Http::StreamResetReason::LocalReset); + } } - if (!parent_.reuse_connection_ || goaway) { + // client_ can already be destroyed if the host was removed during the failure callback above. + if (client_ != nullptr && (!parent_.reuse_connection_ || goaway)) { client_->close(); } } diff --git a/source/extensions/compression/brotli/common/base.cc b/source/extensions/compression/brotli/common/base.cc index fd364124962c..edbd9be90d9f 100644 --- a/source/extensions/compression/brotli/common/base.cc +++ b/source/extensions/compression/brotli/common/base.cc @@ -6,9 +6,10 @@ namespace Compression { namespace Brotli { namespace Common { -BrotliContext::BrotliContext(const uint32_t chunk_size) - : chunk_size_{chunk_size}, chunk_ptr_{std::make_unique(chunk_size)}, next_in_{}, - next_out_{chunk_ptr_.get()}, avail_in_{0}, avail_out_{chunk_size} {} +BrotliContext::BrotliContext(uint32_t chunk_size, uint32_t max_output_size) + : max_output_size_{max_output_size}, chunk_size_{chunk_size}, + chunk_ptr_{std::make_unique(chunk_size)}, next_in_{}, next_out_{chunk_ptr_.get()}, + avail_in_{0}, avail_out_{chunk_size} {} void BrotliContext::updateOutput(Buffer::Instance& output_buffer) { if (avail_out_ == 0) { diff --git a/source/extensions/compression/brotli/common/base.h b/source/extensions/compression/brotli/common/base.h index fed019c9a297..929081c6ef7f 100644 --- a/source/extensions/compression/brotli/common/base.h +++ b/source/extensions/compression/brotli/common/base.h @@ -12,11 +12,12 @@ namespace Common { // Keeps a `Brotli` compression stream's state. struct BrotliContext { - BrotliContext(const uint32_t chunk_size); + BrotliContext(uint32_t chunk_size, uint32_t max_output_size = 0); void updateOutput(Buffer::Instance& output_buffer); void finalizeOutput(Buffer::Instance& output_buffer); + const uint32_t max_output_size_; const uint32_t chunk_size_; std::unique_ptr chunk_ptr_; const uint8_t* next_in_; diff --git a/source/extensions/compression/brotli/decompressor/BUILD b/source/extensions/compression/brotli/decompressor/BUILD index 252eb0e072ac..18155bfaae36 100644 --- a/source/extensions/compression/brotli/decompressor/BUILD +++ b/source/extensions/compression/brotli/decompressor/BUILD @@ -19,6 +19,7 @@ envoy_cc_library( "//envoy/stats:stats_interface", "//envoy/stats:stats_macros", "//source/common/buffer:buffer_lib", + "//source/common/runtime:runtime_features_lib", "//source/extensions/compression/brotli/common:brotli_base_lib", ], ) diff --git a/source/extensions/compression/brotli/decompressor/brotli_decompressor_impl.cc b/source/extensions/compression/brotli/decompressor/brotli_decompressor_impl.cc index adc2dbb9c731..eb1bb144baa5 100644 --- a/source/extensions/compression/brotli/decompressor/brotli_decompressor_impl.cc +++ b/source/extensions/compression/brotli/decompressor/brotli_decompressor_impl.cc @@ -2,12 +2,24 @@ #include +#include "source/common/runtime/runtime_features.h" + namespace Envoy { namespace Extensions { namespace Compression { namespace Brotli { namespace Decompressor { +namespace { + +// How many times the output buffer is allowed to be bigger than the input +// buffer. This value is used to detect compression bombs. +// TODO(rojkov): Re-design the Decompressor interface to handle compression +// bombs gracefully instead of this quick solution. +constexpr uint32_t MaxInflateRatio = 100; + +} // namespace + BrotliDecompressorImpl::BrotliDecompressorImpl(Stats::Scope& scope, const std::string& stats_prefix, const uint32_t chunk_size, const bool disable_ring_buffer_reallocation) @@ -22,7 +34,7 @@ BrotliDecompressorImpl::BrotliDecompressorImpl(Stats::Scope& scope, const std::s void BrotliDecompressorImpl::decompress(const Buffer::Instance& input_buffer, Buffer::Instance& output_buffer) { - Common::BrotliContext ctx(chunk_size_); + Common::BrotliContext ctx(chunk_size_, MaxInflateRatio * input_buffer.length()); for (const Buffer::RawSlice& input_slice : input_buffer.getRawSlices()) { ctx.avail_in_ = input_slice.len_; @@ -58,6 +70,13 @@ bool BrotliDecompressorImpl::process(Common::BrotliContext& ctx, Buffer::Instanc return false; } + if (Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.enable_compression_bomb_protection") && + (output_buffer.length() > ctx.max_output_size_)) { + stats_.brotli_error_.inc(); + return false; + } + ctx.updateOutput(output_buffer); return true; diff --git a/source/extensions/compression/gzip/common/base.h b/source/extensions/compression/gzip/common/base.h index f8b89cb25335..4f427fb90985 100644 --- a/source/extensions/compression/gzip/common/base.h +++ b/source/extensions/compression/gzip/common/base.h @@ -12,7 +12,6 @@ namespace Zlib { /** * Shared code between the compressor and the decompressor. */ -// TODO(junr03): move to extensions tree once the compressor side is moved to extensions. class Base { public: Base(uint64_t chunk_size, std::function zstream_deleter); diff --git a/source/extensions/compression/gzip/decompressor/BUILD b/source/extensions/compression/gzip/decompressor/BUILD index 13939fdcbf2c..2090f49bcd68 100644 --- a/source/extensions/compression/gzip/decompressor/BUILD +++ b/source/extensions/compression/gzip/decompressor/BUILD @@ -21,6 +21,7 @@ envoy_cc_library( "//source/common/buffer:buffer_lib", "//source/common/common:assert_lib", "//source/common/common:minimal_logger_lib", + "//source/common/runtime:runtime_features_lib", "//source/extensions/compression/gzip/common:zlib_base_lib", ], ) diff --git a/source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.cc b/source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.cc index 966730c23880..0932638315d1 100644 --- a/source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.cc +++ b/source/extensions/compression/gzip/decompressor/zlib_decompressor_impl.cc @@ -7,6 +7,7 @@ #include "envoy/common/exception.h" #include "source/common/common/assert.h" +#include "source/common/runtime/runtime_features.h" #include "absl/container/fixed_array.h" @@ -16,6 +17,16 @@ namespace Compression { namespace Gzip { namespace Decompressor { +namespace { + +// How many times the output buffer is allowed to be bigger than the size of +// accumulated input. This value is used to detect compression bombs. +// TODO(rojkov): Re-design the Decompressor interface to handle compression +// bombs gracefully instead of this quick solution. +constexpr uint64_t MaxInflateRatio = 100; + +} // namespace + ZlibDecompressorImpl::ZlibDecompressorImpl(Stats::Scope& scope, const std::string& stats_prefix) : ZlibDecompressorImpl(scope, stats_prefix, 4096) {} @@ -43,6 +54,8 @@ void ZlibDecompressorImpl::init(int64_t window_bits) { void ZlibDecompressorImpl::decompress(const Buffer::Instance& input_buffer, Buffer::Instance& output_buffer) { + uint64_t limit = MaxInflateRatio * input_buffer.length(); + for (const Buffer::RawSlice& input_slice : input_buffer.getRawSlices()) { zstream_ptr_->avail_in = input_slice.len_; zstream_ptr_->next_in = static_cast(input_slice.mem_); @@ -50,6 +63,17 @@ void ZlibDecompressorImpl::decompress(const Buffer::Instance& input_buffer, if (zstream_ptr_->avail_out == 0) { updateOutput(output_buffer); } + + if (Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.enable_compression_bomb_protection") && + (output_buffer.length() > limit)) { + stats_.zlib_data_error_.inc(); + ENVOY_LOG(trace, + "excessive decompression ratio detected: output " + "size {} for input size {}", + output_buffer.length(), input_buffer.length()); + return; + } } } diff --git a/source/extensions/compression/zstd/decompressor/zstd_decompressor_impl.cc b/source/extensions/compression/zstd/decompressor/zstd_decompressor_impl.cc index eb81e4a8587c..7a165da1973c 100644 --- a/source/extensions/compression/zstd/decompressor/zstd_decompressor_impl.cc +++ b/source/extensions/compression/zstd/decompressor/zstd_decompressor_impl.cc @@ -1,11 +1,23 @@ #include "source/extensions/compression/zstd/decompressor/zstd_decompressor_impl.h" +#include "source/common/runtime/runtime_features.h" + namespace Envoy { namespace Extensions { namespace Compression { namespace Zstd { namespace Decompressor { +namespace { + +// How many times the output buffer is allowed to be bigger than the size of +// accumulated input. This value is used to detect compression bombs. +// TODO(rojkov): Re-design the Decompressor interface to handle compression +// bombs gracefully instead of this quick solution. +constexpr uint64_t MaxInflateRatio = 100; + +} // namespace + ZstdDecompressorImpl::ZstdDecompressorImpl(Stats::Scope& scope, const std::string& stats_prefix, const ZstdDDictManagerPtr& ddict_manager, uint32_t chunk_size) @@ -14,6 +26,8 @@ ZstdDecompressorImpl::ZstdDecompressorImpl(Stats::Scope& scope, const std::strin void ZstdDecompressorImpl::decompress(const Buffer::Instance& input_buffer, Buffer::Instance& output_buffer) { + uint64_t limit = MaxInflateRatio * input_buffer.length(); + for (const Buffer::RawSlice& input_slice : input_buffer.getRawSlices()) { if (input_slice.len_ > 0) { if (ddict_manager_ && !is_dictionary_set_) { @@ -38,6 +52,16 @@ void ZstdDecompressorImpl::decompress(const Buffer::Instance& input_buffer, if (!process(output_buffer)) { return; } + if (Runtime::runtimeFeatureEnabled( + "envoy.reloadable_features.enable_compression_bomb_protection") && + (output_buffer.length() > limit)) { + stats_.zstd_generic_error_.inc(); + ENVOY_LOG(trace, + "excessive decompression ratio detected: output " + "size {} for input size {}", + output_buffer.length(), input_buffer.length()); + return; + } } } } diff --git a/source/extensions/compression/zstd/decompressor/zstd_decompressor_impl.h b/source/extensions/compression/zstd/decompressor/zstd_decompressor_impl.h index 0fa777bea6d7..597a8c72ad0e 100644 --- a/source/extensions/compression/zstd/decompressor/zstd_decompressor_impl.h +++ b/source/extensions/compression/zstd/decompressor/zstd_decompressor_impl.h @@ -4,6 +4,7 @@ #include "envoy/stats/scope.h" #include "envoy/stats/stats_macros.h" +#include "source/common/common/logger.h" #include "source/extensions/compression/zstd/common/base.h" #include "source/extensions/compression/zstd/common/dictionary_manager.h" @@ -40,6 +41,7 @@ struct ZstdDecompressorStats { */ class ZstdDecompressorImpl : public Common::Base, public Envoy::Compression::Decompressor::Decompressor, + public Logger::Loggable, NonCopyable { public: ZstdDecompressorImpl(Stats::Scope& scope, const std::string& stats_prefix, diff --git a/source/extensions/filters/http/oauth2/filter.cc b/source/extensions/filters/http/oauth2/filter.cc index 446e3394de77..9ef2f455b235 100644 --- a/source/extensions/filters/http/oauth2/filter.cc +++ b/source/extensions/filters/http/oauth2/filter.cc @@ -203,31 +203,6 @@ const std::string& OAuth2Filter::bearerPrefix() const { CONSTRUCT_ON_FIRST_USE(std::string, "bearer "); } -std::string OAuth2Filter::extractAccessToken(const Http::RequestHeaderMap& headers) const { - ASSERT(headers.Path() != nullptr); - - // Start by looking for a bearer token in the Authorization header. - const Http::HeaderEntry* authorization = headers.getInline(authorization_handle.handle()); - if (authorization != nullptr) { - const auto value = StringUtil::trim(authorization->value().getStringView()); - const auto& bearer_prefix = bearerPrefix(); - if (absl::StartsWithIgnoreCase(value, bearer_prefix)) { - const size_t start = bearer_prefix.length(); - return std::string(StringUtil::ltrim(value.substr(start))); - } - } - - // Check for the named query string parameter. - const auto path = headers.Path()->value().getStringView(); - const auto params = Http::Utility::parseQueryString(path); - const auto param = params.find("token"); - if (param != params.end()) { - return param->second; - } - - return EMPTY_STRING; -} - /** * primary cases: * 1) user is signing out @@ -236,6 +211,10 @@ std::string OAuth2Filter::extractAccessToken(const Http::RequestHeaderMap& heade * 4) user is unauthorized */ Http::FilterHeadersStatus OAuth2Filter::decodeHeaders(Http::RequestHeaderMap& headers, bool) { + // Sanitize the Authorization header, since we have no way to validate its content. Also, + // if token forwarding is enabled, this header will be set based on what is on the HMAC cookie + // before forwarding the request upstream. + headers.removeInline(authorization_handle.handle()); // The following 2 headers are guaranteed for regular requests. The asserts are helpful when // writing test code to not forget these important variables in mock requests @@ -290,17 +269,7 @@ Http::FilterHeadersStatus OAuth2Filter::decodeHeaders(Http::RequestHeaderMap& he request_headers_ = &headers; } - // If a bearer token is supplied as a header or param, we ingest it here and kick off the - // user resolution immediately. Note this comes after HMAC validation, so technically this - // header is sanitized in a way, as the validation check forces the correct Bearer Cookie value. - access_token_ = extractAccessToken(headers); - if (!access_token_.empty()) { - found_bearer_token_ = true; - finishFlow(); - return Http::FilterHeadersStatus::Continue; - } - - // If no access token and this isn't the callback URI, redirect to acquire credentials. + // If this isn't the callback URI, redirect to acquire credentials. // // The following conditional could be replaced with a regex pattern-match, // if we're concerned about strict matching against the callback path. @@ -439,18 +408,6 @@ void OAuth2Filter::onGetAccessTokenSuccess(const std::string& access_code, } void OAuth2Filter::finishFlow() { - - // We have fully completed the entire OAuth flow, whether through Authorization header or from - // user redirection to the auth server. - if (found_bearer_token_) { - if (config_->forwardBearerToken()) { - setBearerToken(*request_headers_, access_token_); - } - config_->stats().oauth_success_.inc(); - decoder_callbacks_->continueDecoding(); - return; - } - std::string token_payload; if (config_->forwardBearerToken()) { token_payload = absl::StrCat(host_, new_expires_, access_token_, id_token_, refresh_token_); @@ -472,8 +429,8 @@ void OAuth2Filter::finishFlow() { const std::string cookie_tail_http_only = fmt::format(CookieTailHttpOnlyFormatString, new_expires_); - // At this point we have all of the pieces needed to authorize a user that did not originally - // have a bearer access token. Now, we construct a redirect request to return the user to their + // At this point we have all of the pieces needed to authorize a user. + // Now, we construct a redirect request to return the user to their // previous state and additionally set the OAuth cookies in browser. // The redirection should result in successfully passing this filter. Http::ResponseHeaderMapPtr response_headers{Http::createHeaderMap( @@ -509,7 +466,6 @@ void OAuth2Filter::finishFlow() { decoder_callbacks_->encodeHeaders(std::move(response_headers), true, REDIRECT_LOGGED_IN); config_->stats().oauth_success_.inc(); - decoder_callbacks_->continueDecoding(); } void OAuth2Filter::sendUnauthorizedResponse() { diff --git a/source/extensions/filters/http/oauth2/filter.h b/source/extensions/filters/http/oauth2/filter.h index 06838a5b65ff..c7d10dddeb82 100644 --- a/source/extensions/filters/http/oauth2/filter.h +++ b/source/extensions/filters/http/oauth2/filter.h @@ -243,7 +243,6 @@ class OAuth2Filter : public Http::PassThroughDecoderFilter, public FilterCallbac std::string new_expires_; absl::string_view host_; std::string state_; - bool found_bearer_token_{false}; Http::RequestHeaderMap* request_headers_{nullptr}; std::unique_ptr oauth_client_; @@ -257,7 +256,6 @@ class OAuth2Filter : public Http::PassThroughDecoderFilter, public FilterCallbac Http::FilterHeadersStatus signOutUser(const Http::RequestHeaderMap& headers); const std::string& bearerPrefix() const; - std::string extractAccessToken(const Http::RequestHeaderMap& headers) const; }; } // namespace Oauth2 diff --git a/source/extensions/filters/http/oauth2/oauth_client.cc b/source/extensions/filters/http/oauth2/oauth_client.cc index 88f935bec92b..97e50fb6ea73 100644 --- a/source/extensions/filters/http/oauth2/oauth_client.cc +++ b/source/extensions/filters/http/oauth2/oauth_client.cc @@ -21,9 +21,6 @@ namespace HttpFilters { namespace Oauth2 { namespace { -Http::RegisterCustomInlineHeader - authorization_handle(Http::CustomHeaders::get().Authorization); - constexpr const char* GetAccessTokenBodyFormatString = "grant_type=authorization_code&code={0}&client_id={1}&client_secret={2}&redirect_uri={3}"; diff --git a/test/common/stream_info/stream_info_impl_test.cc b/test/common/stream_info/stream_info_impl_test.cc index 419c9e9d9b2a..e3a4692f3f73 100644 --- a/test/common/stream_info/stream_info_impl_test.cc +++ b/test/common/stream_info/stream_info_impl_test.cc @@ -219,6 +219,34 @@ TEST_F(StreamInfoImplTest, MiscSettersAndGetters) { } } +TEST_F(StreamInfoImplTest, SetFrom) { + StreamInfoImpl s1(Http::Protocol::Http2, test_time_.timeSystem(), nullptr); + + s1.addBytesReceived(1); + s1.downstreamTiming().onLastDownstreamRxByteReceived(test_time_.timeSystem()); + +#ifdef __clang__ +#if defined(__linux__) +#if defined(__has_feature) && !(__has_feature(thread_sanitizer)) + ASSERT_TRUE(sizeof(s1) == 760 || sizeof(s1) == 776 || sizeof(s1) == 800) + << "If adding fields to StreamInfoImpl, please check to see if you " + "need to add them to setFromForRecreateStream! Current size " + << sizeof(s1); +#endif +#endif +#endif + + StreamInfoImpl s2(Http::Protocol::Http11, test_time_.timeSystem(), nullptr); + s2.setFromForRecreateStream(s1); + EXPECT_EQ(s1.startTime(), s2.startTime()); + EXPECT_EQ(s1.startTimeMonotonic(), s2.startTimeMonotonic()); + EXPECT_EQ(s1.downstreamTiming().lastDownstreamRxByteReceived(), + s2.downstreamTiming().lastDownstreamRxByteReceived()); + EXPECT_EQ(s1.protocol(), s2.protocol()); + EXPECT_EQ(s1.bytesReceived(), s2.bytesReceived()); + EXPECT_EQ(s1.getDownstreamBytesMeter(), s2.getDownstreamBytesMeter()); +} + TEST_F(StreamInfoImplTest, DynamicMetadataTest) { StreamInfoImpl stream_info(Http::Protocol::Http2, test_time_.timeSystem(), nullptr); diff --git a/test/common/upstream/health_checker_impl_test.cc b/test/common/upstream/health_checker_impl_test.cc index e8ec25a14a6e..0aedf94a6e5e 100644 --- a/test/common/upstream/health_checker_impl_test.cc +++ b/test/common/upstream/health_checker_impl_test.cc @@ -4737,6 +4737,70 @@ TEST_F(GrpcHealthCheckerImplTest, SuccessStartFailedFailFirst) { expectHostHealthy(true); } +// Verify functionality when a host is removed inline with a failure via RPC that was proceeded +// by a GOAWAY. +TEST_F(GrpcHealthCheckerImplTest, GrpcHealthFailViaRpcRemoveHostInCallback) { + setupHC(); + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; + + expectSessionCreate(); + expectHealthcheckStart(0); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); + health_checker_->start(); + + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)) + .WillOnce(Invoke([&](HostSharedPtr host, HealthTransition) { + cluster_->prioritySet().getMockHostSet(0)->hosts_ = {}; + cluster_->prioritySet().runUpdateCallbacks(0, {}, {host}); + })); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); + test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::NoError); + respondServiceStatus(0, grpc::health::v1::HealthCheckResponse::NOT_SERVING); +} + +// Verify functionality when a host is removed inline with a failure via an error GOAWAY. +TEST_F(GrpcHealthCheckerImplTest, GrpcHealthFailViaGoawayRemoveHostInCallback) { + setupHCWithUnhealthyThreshold(/*threshold=*/1); + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; + + expectSessionCreate(); + expectHealthcheckStart(0); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); + health_checker_->start(); + + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)) + .WillOnce(Invoke([&](HostSharedPtr host, HealthTransition) { + cluster_->prioritySet().getMockHostSet(0)->hosts_ = {}; + cluster_->prioritySet().runUpdateCallbacks(0, {}, {host}); + })); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); + test_sessions_[0]->codec_client_->raiseGoAway(Http::GoAwayErrorCode::Other); +} + +// Verify functionality when a host is removed inline with by a bad RPC response. +TEST_F(GrpcHealthCheckerImplTest, GrpcHealthFailViaBadResponseRemoveHostInCallback) { + setupHCWithUnhealthyThreshold(/*threshold=*/1); + cluster_->prioritySet().getMockHostSet(0)->hosts_ = { + makeTestHost(cluster_->info_, "tcp://127.0.0.1:80", simTime())}; + + expectSessionCreate(); + expectHealthcheckStart(0); + EXPECT_CALL(event_logger_, logUnhealthy(_, _, _, true)); + health_checker_->start(); + + EXPECT_CALL(*this, onHostStatus(_, HealthTransition::Changed)) + .WillOnce(Invoke([&](HostSharedPtr host, HealthTransition) { + cluster_->prioritySet().getMockHostSet(0)->hosts_ = {}; + cluster_->prioritySet().runUpdateCallbacks(0, {}, {host}); + })); + EXPECT_CALL(event_logger_, logEjectUnhealthy(_, _, _)); + std::unique_ptr response_headers( + new Http::TestResponseHeaderMapImpl{{":status", "500"}}); + test_sessions_[0]->stream_response_callbacks_->decodeHeaders(std::move(response_headers), false); +} + // Test host recovery after explicit check failure requires several successful checks. TEST_F(GrpcHealthCheckerImplTest, GrpcHealthFail) { setupHC(); diff --git a/test/extensions/compression/brotli/decompressor/brotli_decompressor_impl_test.cc b/test/extensions/compression/brotli/decompressor/brotli_decompressor_impl_test.cc index c03884f35f9f..57102b7e838e 100644 --- a/test/extensions/compression/brotli/decompressor/brotli_decompressor_impl_test.cc +++ b/test/extensions/compression/brotli/decompressor/brotli_decompressor_impl_test.cc @@ -25,6 +25,32 @@ class BrotliDecompressorImplTest : public testing::Test { static constexpr uint32_t default_input_size{796}; }; +// Detect excessive compression ratio by compressing a long whitespace string +// into a very small chunk of data and decompressing it again. +TEST_F(BrotliDecompressorImplTest, DetectExcessiveCompressionRatio) { + const absl::string_view ten_whitespaces = " "; + Brotli::Compressor::BrotliCompressorImpl compressor{ + default_quality, + default_window_bits, + default_input_block_bits, + false, + Brotli::Compressor::BrotliCompressorImpl::EncoderMode::Default, + 4096}; + Buffer::OwnedImpl buffer; + + for (int i = 0; i < 1000; i++) { + buffer.add(ten_whitespaces); + } + + compressor.compress(buffer, Envoy::Compression::Compressor::State::Finish); + + Buffer::OwnedImpl output_buffer; + Stats::IsolatedStoreImpl stats_store{}; + BrotliDecompressorImpl decompressor{stats_store, "test.", 16, false}; + decompressor.decompress(buffer, output_buffer); + EXPECT_EQ(1, stats_store.counterFromString("test.brotli_error").value()); +} + // Exercises compression and decompression by compressing some data, decompressing it and then // comparing compressor's input/checksum with decompressor's output/checksum. TEST_F(BrotliDecompressorImplTest, CompressAndDecompress) { diff --git a/test/extensions/compression/gzip/compressor_fuzz_test.cc b/test/extensions/compression/gzip/compressor_fuzz_test.cc index 73c592cb1f81..c745abb7a9a2 100644 --- a/test/extensions/compression/gzip/compressor_fuzz_test.cc +++ b/test/extensions/compression/gzip/compressor_fuzz_test.cc @@ -71,8 +71,10 @@ DEFINE_FUZZER(const uint8_t* buf, size_t len) { : Envoy::Compression::Compressor::State::Flush); decompressor.decompress(buffer, full_output); } - RELEASE_ASSERT(full_input.toString() == full_output.toString(), ""); - RELEASE_ASSERT(compressor.checksum() == decompressor.checksum(), ""); + if (stats_store.counterFromString("test.zlib_data_error").value() == 0) { + RELEASE_ASSERT(full_input.toString() == full_output.toString(), ""); + RELEASE_ASSERT(compressor.checksum() == decompressor.checksum(), ""); + } } } // namespace Fuzz diff --git a/test/extensions/compression/gzip/decompressor/zlib_decompressor_impl_test.cc b/test/extensions/compression/gzip/decompressor/zlib_decompressor_impl_test.cc index 0346118a2b22..b895415f8870 100644 --- a/test/extensions/compression/gzip/decompressor/zlib_decompressor_impl_test.cc +++ b/test/extensions/compression/gzip/decompressor/zlib_decompressor_impl_test.cc @@ -122,6 +122,31 @@ TEST_F(ZlibDecompressorImplTest, CallingChecksum) { ASSERT_EQ(0, decompressor.decompression_error_); } +// Detect excessive compression ratio by compressing a long whitespace string +// into a very small chunk of data and decompressing it again. +TEST_F(ZlibDecompressorImplTest, DetectExcessiveCompressionRatio) { + const absl::string_view ten_whitespaces = " "; + Buffer::OwnedImpl buffer; + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl compressor; + compressor.init( + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionLevel::Standard, + Extensions::Compression::Gzip::Compressor::ZlibCompressorImpl::CompressionStrategy::Standard, + gzip_window_bits, memory_level); + + for (int i = 0; i < 1000; i++) { + buffer.add(ten_whitespaces); + } + + compressor.compress(buffer, Envoy::Compression::Compressor::State::Finish); + + Buffer::OwnedImpl output_buffer; + Stats::IsolatedStoreImpl stats_store{}; + ZlibDecompressorImpl decompressor{stats_store, "test."}; + decompressor.init(gzip_window_bits); + decompressor.decompress(buffer, output_buffer); + ASSERT_EQ(stats_store.counterFromString("test.zlib_data_error").value(), 1); +} + // Exercises compression and decompression by compressing some data, decompressing it and then // comparing compressor's input/checksum with decompressor's output/checksum. TEST_F(ZlibDecompressorImplTest, CompressAndDecompress) { diff --git a/test/extensions/compression/zstd/decompressor/zstd_decompressor_impl_test.cc b/test/extensions/compression/zstd/decompressor/zstd_decompressor_impl_test.cc index 0ab00b1cde9b..be14e14ec11a 100644 --- a/test/extensions/compression/zstd/decompressor/zstd_decompressor_impl_test.cc +++ b/test/extensions/compression/zstd/decompressor/zstd_decompressor_impl_test.cc @@ -149,6 +149,27 @@ TEST_F(ZstdDecompressorImplTest, IllegalConfig) { "assert failure: id != 0. Details: Illegal Zstd dictionary"); } +// Detect excessive compression ratio by compressing a long whitespace string +// into a very small chunk of data and decompressing it again. +TEST_F(ZstdDecompressorImplTest, DetectExcessiveCompressionRatio) { + const absl::string_view ten_whitespaces = " "; + Buffer::OwnedImpl buffer; + for (int i = 0; i < 1000; i++) { + buffer.add(ten_whitespaces); + } + + Zstd::Compressor::ZstdCompressorImpl compressor{default_compression_level_, + default_enable_checksum_, default_strategy_, + default_cdict_manager_, 4096}; + compressor.compress(buffer, Envoy::Compression::Compressor::State::Finish); + + Buffer::OwnedImpl output_buffer; + Stats::IsolatedStoreImpl stats_store{}; + ZstdDecompressorImpl decompressor{stats_store, "test.", default_ddict_manager_, 16}; + decompressor.decompress(buffer, output_buffer); + ASSERT_EQ(stats_store.counterFromString("test.zstd_generic_error").value(), 1); +} + } // namespace // Copy from diff --git a/test/extensions/filters/http/oauth2/filter_test.cc b/test/extensions/filters/http/oauth2/filter_test.cc index 80445e0effdc..74586168404c 100644 --- a/test/extensions/filters/http/oauth2/filter_test.cc +++ b/test/extensions/filters/http/oauth2/filter_test.cc @@ -95,7 +95,7 @@ class OAuth2Test : public testing::Test { } // Set up proto fields with standard config. - FilterConfigSharedPtr getConfig() { + FilterConfigSharedPtr getConfig(bool forward_bearer_token = true) { envoy::extensions::filters::http::oauth2::v3::OAuth2Config p; auto* endpoint = p.mutable_token_endpoint(); endpoint->set_cluster("auth.example.com"); @@ -105,7 +105,7 @@ class OAuth2Test : public testing::Test { p.mutable_redirect_path_matcher()->mutable_path()->set_exact(TEST_CALLBACK); p.set_authorization_endpoint("https://auth.example.com/oauth/authorize/"); p.mutable_signout_path()->mutable_path()->set_exact("/_signout"); - p.set_forward_bearer_token(true); + p.set_forward_bearer_token(forward_bearer_token); p.add_auth_scopes("user"); p.add_auth_scopes("openid"); p.add_auth_scopes("email"); @@ -422,6 +422,50 @@ TEST_F(OAuth2Test, OAuthOkPass) { EXPECT_EQ(scope_.counterFromString("test.oauth_success").value(), 1); } +/** + * Scenario: The OAuth filter receives a request to an arbitrary path with valid OAuth cookies + * (cookie values and validation are mocked out), but with an invalid token in the Authorization + * header and forwarding bearer token is disabled. + * + * Expected behavior: the filter should sanitize the Authorization header and let the request + * proceed. + */ +TEST_F(OAuth2Test, OAuthOkPassButInvalidToken) { + init(getConfig(false /* forward_bearer_token */)); + + Http::TestRequestHeaderMapImpl mock_request_headers{ + {Http::Headers::get().Path.get(), "/anypath"}, + {Http::Headers::get().Host.get(), "traffic.example.com"}, + {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get}, + {Http::Headers::get().Scheme.get(), "https"}, + {Http::CustomHeaders::get().Authorization.get(), "Bearer injected_malice!"}, + }; + + Http::TestRequestHeaderMapImpl expected_headers{ + {Http::Headers::get().Path.get(), "/anypath"}, + {Http::Headers::get().Host.get(), "traffic.example.com"}, + {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get}, + {Http::Headers::get().Scheme.get(), "https"}, + }; + + // cookie-validation mocking + EXPECT_CALL(*validator_, setParams(_, _)); + EXPECT_CALL(*validator_, isValid()).WillOnce(Return(true)); + + // Sanitized return reference mocking + std::string legit_token{"legit_token"}; + EXPECT_CALL(*validator_, token()).WillRepeatedly(ReturnRef(legit_token)); + + EXPECT_EQ(Http::FilterHeadersStatus::Continue, + filter_->decodeHeaders(mock_request_headers, false)); + + // Ensure that existing OAuth forwarded headers got sanitized. + EXPECT_EQ(mock_request_headers, expected_headers); + + EXPECT_EQ(scope_.counterFromString("test.oauth_failure").value(), 0); + EXPECT_EQ(scope_.counterFromString("test.oauth_success").value(), 1); +} + /** * Scenario: The OAuth filter receives a request without valid OAuth cookies to a non-callback URL * (indicating that the user needs to re-validate cookies or get 401'd). @@ -790,21 +834,12 @@ TEST_F(OAuth2Test, OAuthTestFullFlowPostWithParameters) { EXPECT_CALL(decoder_callbacks_, encodeHeaders_(HeaderMapEqualRef(&second_response_headers), true)); - EXPECT_CALL(decoder_callbacks_, continueDecoding()); filter_->finishFlow(); } TEST_F(OAuth2Test, OAuthBearerTokenFlowFromHeader) { - Http::TestRequestHeaderMapImpl request_headers_before{ - {Http::Headers::get().Path.get(), "/test?role=bearer"}, - {Http::Headers::get().Host.get(), "traffic.example.com"}, - {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get}, - {Http::Headers::get().Scheme.get(), "https"}, - {Http::CustomHeaders::get().Authorization.get(), "Bearer xyz-header-token"}, - }; - // Expected decoded headers after the callback & validation of the bearer token is complete. - Http::TestRequestHeaderMapImpl request_headers_after{ + Http::TestRequestHeaderMapImpl request_headers{ {Http::Headers::get().Path.get(), "/test?role=bearer"}, {Http::Headers::get().Host.get(), "traffic.example.com"}, {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get}, @@ -812,41 +847,28 @@ TEST_F(OAuth2Test, OAuthBearerTokenFlowFromHeader) { {Http::CustomHeaders::get().Authorization.get(), "Bearer xyz-header-token"}, }; - // Fail the validation to trigger the OAuth flow. + // Fail the validation. EXPECT_CALL(*validator_, setParams(_, _)); EXPECT_CALL(*validator_, isValid()).WillOnce(Return(false)); - EXPECT_EQ(Http::FilterHeadersStatus::Continue, - filter_->decodeHeaders(request_headers_before, false)); - - // Finally, expect that the header map had OAuth information appended to it. - EXPECT_EQ(request_headers_before, request_headers_after); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers, false)); } TEST_F(OAuth2Test, OAuthBearerTokenFlowFromQueryParameters) { - Http::TestRequestHeaderMapImpl request_headers_before{ - {Http::Headers::get().Path.get(), "/test?role=bearer&token=xyz-queryparam-token"}, - {Http::Headers::get().Host.get(), "traffic.example.com"}, - {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get}, - {Http::Headers::get().Scheme.get(), "https"}, - }; - Http::TestRequestHeaderMapImpl request_headers_after{ + Http::TestRequestHeaderMapImpl request_headers{ {Http::Headers::get().Path.get(), "/test?role=bearer&token=xyz-queryparam-token"}, {Http::Headers::get().Host.get(), "traffic.example.com"}, {Http::Headers::get().Method.get(), Http::Headers::get().MethodValues.Get}, {Http::Headers::get().Scheme.get(), "https"}, - {Http::CustomHeaders::get().Authorization.get(), "Bearer xyz-queryparam-token"}, }; - // Fail the validation to trigger the OAuth flow. + // Fail the validation. EXPECT_CALL(*validator_, setParams(_, _)); EXPECT_CALL(*validator_, isValid()).WillOnce(Return(false)); - EXPECT_EQ(Http::FilterHeadersStatus::Continue, - filter_->decodeHeaders(request_headers_before, false)); - - // Expected decoded headers after the callback & validation of the bearer token is complete. - EXPECT_EQ(request_headers_before, request_headers_after); + EXPECT_EQ(Http::FilterHeadersStatus::StopIteration, + filter_->decodeHeaders(request_headers, false)); } } // namespace Oauth2 diff --git a/test/integration/cds_integration_test.cc b/test/integration/cds_integration_test.cc index 58c737f57afe..9c741350d42a 100644 --- a/test/integration/cds_integration_test.cc +++ b/test/integration/cds_integration_test.cc @@ -303,6 +303,52 @@ TEST_P(CdsIntegrationTest, TwoClusters) { cleanupUpstreamAndDownstream(); } +// Test internal redirect to a cluster removed during the backend think time. +TEST_P(CdsIntegrationTest, TwoClustersAndRedirects) { + setDownstreamProtocol(Http::CodecType::HTTP1); + config_helper_.addConfigModifier( + [](envoy::extensions::filters::network::http_connection_manager::v3::HttpConnectionManager& + hcm) { + auto* route = hcm.mutable_route_config()->mutable_virtual_hosts(0)->mutable_routes(1); + route->mutable_route() + ->mutable_internal_redirect_policy() + ->mutable_redirect_response_codes() + ->Add(302); + }); + + // Tell Envoy that cluster_2 is here. + initialize(); + sendDiscoveryResponse( + Config::TypeUrl::get().Cluster, {cluster1_, cluster2_}, {cluster2_}, {}, "42"); + // The '3' includes the fake CDS server. + test_server_->waitForGaugeGe("cluster_manager.active_clusters", 3); + // Tell Envoy that cluster_1 is gone. + sendDiscoveryResponse(Config::TypeUrl::get().Cluster, + {cluster2_}, {}, {ClusterName1}, "43"); + test_server_->waitForCounterGe("cluster_manager.cluster_removed", 1); + + codec_client_ = makeHttpConnection(makeClientConnection((lookupPort("http")))); + default_request_headers_.setPath("/cluster2"); + default_request_headers_.setContentLength("4"); + auto encoder_decoder = codec_client_->startRequest(default_request_headers_); + Buffer::OwnedImpl data("body"); + encoder_decoder.first.encodeData(data, true); + auto& response = encoder_decoder.second; + + ASSERT_TRUE(fake_upstreams_[UpstreamIndex2]->waitForHttpConnection(*dispatcher_, + fake_upstream_connection_)); + ASSERT_TRUE(fake_upstream_connection_->waitForNewStream(*dispatcher_, upstream_request_)); + ASSERT_TRUE(upstream_request_->waitForEndStream(*dispatcher_)); + + Http::TestResponseHeaderMapImpl redirect_response{ + {":status", "302"}, {"content-length", "0"}, {"location", "http://host/cluster1"}}; + + // Send a response to the original request redirecting to the deleted cluster. + upstream_request_->encodeHeaders(redirect_response, true); + ASSERT_TRUE(response->waitForEndStream()); + EXPECT_EQ("503", response->headers().getStatusValue()); +} + // Tests that when Envoy's delta xDS stream dis/reconnects, Envoy can inform the server of the // resources it already has: the reconnected stream need not start with a state-of-the-world update. TEST_P(CdsIntegrationTest, VersionsRememberedAfterReconnect) {