diff --git a/.circleci/config.yml b/.circleci/config.yml index acd03f9460..1dc4843759 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -48,14 +48,14 @@ executors: # See https://circleci.com/docs/xcode-policy along with the support matrix # at https://circleci.com/docs/using-macos#supported-xcode-versions. # We use the major.minor notation to bring in compatible patches. - xcode: 14.2 + xcode: "14.2.0" resource_class: macos.m1.medium.gen1 macos_test: &macos_test_executor macos: # See https://circleci.com/docs/xcode-policy along with the support matrix # at https://circleci.com/docs/using-macos#supported-xcode-versions. # We use the major.minor notation to bring in compatible patches. - xcode: 14.2 + xcode: "14.2.0" resource_class: macos.m1.medium.gen1 windows_build: &windows_build_executor machine: @@ -201,7 +201,7 @@ commands: find xtask/src -type f | while read name; do md5sum $name; done | sort -k 2 | md5sum > ~/.xtask_version # The closest common ancestor to the default branch, so that test jobs can take advantage previous compiles git remote set-head origin -a - TARGET_BRANCH=$(git rev-parse --abbrev-ref origin/HEAD) + TARGET_BRANCH=$(git rev-parse --abbrev-ref origin/HEAD) echo "Target branch is ${TARGET_BRANCH}" COMMON_ANCESTOR_REF=$(git merge-base HEAD "${TARGET_BRANCH}") echo "Common ancestor is ${COMMON_ANCESTOR_REF}" @@ -715,8 +715,8 @@ jobs: # save containers for analysis mkdir built-containers docker save -o built-containers/router_${VERSION}-debug.tar ${ROUTER_TAG}:${VERSION}-debug - docker save -o built-containers/router_${VERSION}.tar ${ROUTER_TAG}:${VERSION} - + docker save -o built-containers/router_${VERSION}.tar ${ROUTER_TAG}:${VERSION} + - persist_to_workspace: root: . paths: @@ -971,8 +971,8 @@ workflows: # Disables all PR comments from this job do-pr-comments: false # Scan job will return 1 if findings violating the Wiz policy are found. - # Toggle off to prevent any CI failures OR - # contact Apollo's Security team to adjust what violates the + # Toggle off to prevent any CI failures OR + # contact Apollo's Security team to adjust what violates the # Wiz policy used in this scan. fail-on-findings: true # Configure scan job to use a policy specific to apollo-router. @@ -1069,7 +1069,7 @@ workflows: ignore: /.*/ tags: only: /v.*/ - + security-scans: when: not: << pipeline.parameters.nightly >> diff --git a/CHANGELOG.md b/CHANGELOG.md index ef575a10b5..056daa8a06 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,8 +4,37 @@ All notable changes to Router will be documented in this file. This project adheres to [Semantic Versioning v2.0.0](https://semver.org/spec/v2.0.0.html). +# [1.45.1] - 2024-04-26 + +## 🐛 Fixes + +### Correct v1.44.0 regression in query plan cache ([PR #5028](https://github.com/apollographql/router/pull/5028)) + +Correct a critical regression that was introduced in [v1.44.0](https://github.com/apollographql/router/pull/4883) which could lead to execution of an incorrect query plan. + +By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/5028 + +### Use entire schema when hashing an introspection query ([Issue #5006](https://github.com/apollographql/router/issues/5006)) + +Correct a _different_ hashing bug which impacted introspection queries which was also introduced in [v1.44.0](https://github.com/apollographql/router/pull/4883). This other hashing bug failed to account for introspection queries, resulting in introspection results being misaligned to the current schema. + +This release fixes the hashing mechanism by adding the schema string to hashed data if an introspection field is encountered. As a result, the entire schema is taken into account and the correct introspection result is returned. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/5007 + +### Fix subgraph name mapping of Datadog exporter ([PR #5012](https://github.com/apollographql/router/pull/5012)) + +Previously in the router v1.45.0, subgraph name mapping didn't work correctly in the router's Datadog exporter. The exporter used the incorrect value `apollo.subgraph.name` for mapping attributes when it should have used the value `subgraph.name`. This issue has been fixed in this release. + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/5012 + + # [1.45.0] - 2024-04-22 +> **Warning** +> +> **This version has a critical bug impacting all users. See the _Fixes_ in [v1.45.1](https://github.com/apollographql/router/releases/tag/v1.45.1) for details. We highly recommend using v1.45.1 or v1.43.2 over v1.45.0.** + ## 🚀 Features ### Query validation process with Rust ([PR #4551](https://github.com/apollographql/router/pull/4551)) @@ -154,6 +183,11 @@ By [@bonnici](https://github.com/bonnici) in https://github.com/apollographql/ro # [1.44.0] - 2024-04-12 +> **Warning** +> +> **This version has a critical bug impacting all users. See the _Fixes_ in [v1.45.1](https://github.com/apollographql/router/releases/tag/v1.45.1) for details. We highly recommend using v1.45.1 or v1.43.2 over v1.44.0.** + + ## 🚀 Features ### Add details to `router service call failed` errors ([Issue #4899](https://github.com/apollographql/router/issues/4899)) diff --git a/Cargo.lock b/Cargo.lock index 156bf8d353..f7e2a3fbad 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -250,7 +250,7 @@ dependencies = [ [[package]] name = "apollo-router" -version = "1.45.0" +version = "1.45.1" dependencies = [ "access-json", "anyhow", @@ -411,7 +411,7 @@ dependencies = [ [[package]] name = "apollo-router-benchmarks" -version = "1.45.0" +version = "1.45.1" dependencies = [ "apollo-parser", "apollo-router", @@ -427,7 +427,7 @@ dependencies = [ [[package]] name = "apollo-router-scaffold" -version = "1.45.0" +version = "1.45.1" dependencies = [ "anyhow", "cargo-scaffold", diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml index f33eea0f89..2c0e6c32b8 100644 --- a/apollo-router-benchmarks/Cargo.toml +++ b/apollo-router-benchmarks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-benchmarks" -version = "1.45.0" +version = "1.45.1" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml index e7772cd9a1..95d195dfbd 100644 --- a/apollo-router-scaffold/Cargo.toml +++ b/apollo-router-scaffold/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-scaffold" -version = "1.45.0" +version = "1.45.1" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/templates/base/Cargo.toml b/apollo-router-scaffold/templates/base/Cargo.toml index 47ff9108a6..78785f27e4 100644 --- a/apollo-router-scaffold/templates/base/Cargo.toml +++ b/apollo-router-scaffold/templates/base/Cargo.toml @@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" } apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} # Note if you update these dependencies then also update xtask/Cargo.toml -apollo-router = "1.45.0" +apollo-router = "1.45.1" {{/if}} {{/if}} async-trait = "0.1.52" diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.toml index bdace89219..f0cf6dc71b 100644 --- a/apollo-router-scaffold/templates/base/xtask/Cargo.toml +++ b/apollo-router-scaffold/templates/base/xtask/Cargo.toml @@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" } {{#if branch}} apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} -apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.45.0" } +apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.45.1" } {{/if}} {{/if}} anyhow = "1.0.58" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 8070eee9ee..a3eebd4ab9 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router" -version = "1.45.0" +version = "1.45.1" authors = ["Apollo Graph, Inc. "] repository = "https://github.com/apollographql/router/" documentation = "https://docs.rs/apollo-router" diff --git a/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__non_overridden_field_yields_expected_query_plan.snap b/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__non_overridden_field_yields_expected_query_plan.snap index 1961250dd2..d680c0b53f 100644 --- a/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__non_overridden_field_yields_expected_query_plan.snap +++ b/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__non_overridden_field_yields_expected_query_plan.snap @@ -18,7 +18,7 @@ expression: query_plan "id": null, "inputRewrites": null, "outputRewrites": null, - "schemaAwareHash": "9358047754b11522aac502a3c6a668cd4286c07d489680834e63d6e033db4eb5", + "schemaAwareHash": "12dda6193654ae4fe6e38bc09d4f81cc73d0c9e098692096f72d2158eef4776f", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__overridden_field_yields_expected_query_plan.snap b/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__overridden_field_yields_expected_query_plan.snap index 5d4e064a6d..612b147fc2 100644 --- a/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__overridden_field_yields_expected_query_plan.snap +++ b/apollo-router/src/plugins/progressive_override/snapshots/apollo_router__plugins__progressive_override__tests__overridden_field_yields_expected_query_plan.snap @@ -23,7 +23,7 @@ expression: query_plan "id": null, "inputRewrites": null, "outputRewrites": null, - "schemaAwareHash": "8f445761c0bcdda90b8da35ccd13fd98e474514f3efc071bd2c39495b5af94e5", + "schemaAwareHash": "00ad582ea45fc1bce436b36b21512f3d2c47b74fdbdc61e4b349289722c9ecf2", "authorization": { "is_authenticated": false, "scopes": [], @@ -61,7 +61,7 @@ expression: query_plan "id": null, "inputRewrites": null, "outputRewrites": null, - "schemaAwareHash": "9a1feab7ee8c57c8a4ab4db29712412a9cfe94009bfcb40dc0d22ea54c410865", + "schemaAwareHash": "a8ebdc2151a2e5207882e43c6906c0c64167fd9a8e0c7c4becc47736a5105096", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan-2.snap b/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan-2.snap index 0767d22f80..b32a905c0e 100644 --- a/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan-2.snap +++ b/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan-2.snap @@ -68,7 +68,7 @@ expression: "serde_json::to_value(response).unwrap()" "id": null, "inputRewrites": null, "outputRewrites": null, - "schemaAwareHash": "34be619a78867ab9d0670048f4c93574e38cd9253e9cc032f567078355b25086", + "schemaAwareHash": "7245d488e97c3b2ac9f5fa4dd4660940b94ad81af070013305b2c0f76337b2f9", "authorization": { "is_authenticated": false, "scopes": [], @@ -107,7 +107,7 @@ expression: "serde_json::to_value(response).unwrap()" "id": null, "inputRewrites": null, "outputRewrites": null, - "schemaAwareHash": "f1582d942020b23347d84f6ae46c018492ae7c59c9b1472e0b442121ddf16368", + "schemaAwareHash": "6e0b4156706ea0cf924500cfdc99dd44b9f0ed07e2d3f888d4aff156e6a33238", "authorization": { "is_authenticated": false, "scopes": [], @@ -153,7 +153,7 @@ expression: "serde_json::to_value(response).unwrap()" "id": null, "inputRewrites": null, "outputRewrites": null, - "schemaAwareHash": "6fa5a74c5af2b18f343e9e69bbcbc9335e9faaa46c3d8964d199002dfeb0026f", + "schemaAwareHash": "ff649f3d70241d5a8cd5f5d03ff4c41ecff72b0e4129a480207b05ac92318042", "authorization": { "is_authenticated": false, "scopes": [], @@ -196,7 +196,7 @@ expression: "serde_json::to_value(response).unwrap()" "id": null, "inputRewrites": null, "outputRewrites": null, - "schemaAwareHash": "6fa5a74c5af2b18f343e9e69bbcbc9335e9faaa46c3d8964d199002dfeb0026f", + "schemaAwareHash": "bf9f3beda78a7a565e47c862157bad4ec871d724d752218da1168455dddca074", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan.snap b/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan.snap index 0767d22f80..b32a905c0e 100644 --- a/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan.snap +++ b/apollo-router/src/plugins/snapshots/apollo_router__plugins__expose_query_plan__tests__it_expose_query_plan.snap @@ -68,7 +68,7 @@ expression: "serde_json::to_value(response).unwrap()" "id": null, "inputRewrites": null, "outputRewrites": null, - "schemaAwareHash": "34be619a78867ab9d0670048f4c93574e38cd9253e9cc032f567078355b25086", + "schemaAwareHash": "7245d488e97c3b2ac9f5fa4dd4660940b94ad81af070013305b2c0f76337b2f9", "authorization": { "is_authenticated": false, "scopes": [], @@ -107,7 +107,7 @@ expression: "serde_json::to_value(response).unwrap()" "id": null, "inputRewrites": null, "outputRewrites": null, - "schemaAwareHash": "f1582d942020b23347d84f6ae46c018492ae7c59c9b1472e0b442121ddf16368", + "schemaAwareHash": "6e0b4156706ea0cf924500cfdc99dd44b9f0ed07e2d3f888d4aff156e6a33238", "authorization": { "is_authenticated": false, "scopes": [], @@ -153,7 +153,7 @@ expression: "serde_json::to_value(response).unwrap()" "id": null, "inputRewrites": null, "outputRewrites": null, - "schemaAwareHash": "6fa5a74c5af2b18f343e9e69bbcbc9335e9faaa46c3d8964d199002dfeb0026f", + "schemaAwareHash": "ff649f3d70241d5a8cd5f5d03ff4c41ecff72b0e4129a480207b05ac92318042", "authorization": { "is_authenticated": false, "scopes": [], @@ -196,7 +196,7 @@ expression: "serde_json::to_value(response).unwrap()" "id": null, "inputRewrites": null, "outputRewrites": null, - "schemaAwareHash": "6fa5a74c5af2b18f343e9e69bbcbc9335e9faaa46c3d8964d199002dfeb0026f", + "schemaAwareHash": "bf9f3beda78a7a565e47c862157bad4ec871d724d752218da1168455dddca074", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/apollo-router/src/plugins/telemetry/apollo_exporter.rs b/apollo-router/src/plugins/telemetry/apollo_exporter.rs index 531cc94dcb..f6bfcc6fca 100644 --- a/apollo-router/src/plugins/telemetry/apollo_exporter.rs +++ b/apollo-router/src/plugins/telemetry/apollo_exporter.rs @@ -442,17 +442,3 @@ where None => serializer.serialize_none(), } } - -#[cfg(not(windows))] // git checkout converts \n to \r\n, making == below fail -#[test] -fn check_reports_proto_is_up_to_date() { - let proto_url = "https://usage-reporting.api.apollographql.com/proto/reports.proto"; - let response = reqwest::blocking::get(proto_url).unwrap(); - let content = response.text().unwrap(); - // Not using assert_eq! as printing the entire file would be too verbose - assert!( - content == include_str!("proto/reports.proto"), - "Protobuf file is out of date. Run this command to update it:\n\n \ - curl -f {proto_url} > apollo-router/src/plugins/telemetry/proto/reports.proto\n\n" - ); -} diff --git a/apollo-router/src/plugins/telemetry/tracing/datadog.rs b/apollo-router/src/plugins/telemetry/tracing/datadog.rs index 74497c5347..cd5f4351a2 100644 --- a/apollo-router/src/plugins/telemetry/tracing/datadog.rs +++ b/apollo-router/src/plugins/telemetry/tracing/datadog.rs @@ -29,7 +29,7 @@ lazy_static! { map.insert("request", "http.route"); map.insert("supergraph", "graphql.operation.name"); map.insert("query_planning", "graphql.operation.name"); - map.insert("subgraph", "apollo.subgraph.name"); + map.insert("subgraph", "subgraph.name"); map.insert("subgraph_request", "graphql.operation.name"); map }; diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs index e740f0aa4c..62e7197e66 100644 --- a/apollo-router/src/query_planner/bridge_query_planner.rs +++ b/apollo-router/src/query_planner/bridge_query_planner.rs @@ -379,7 +379,9 @@ impl BridgeQueryPlanner { .into_result() { Ok(mut plan) => { - plan.data.query_plan.hash_subqueries(&self.subgraph_schemas); + plan.data + .query_plan + .hash_subqueries(&self.subgraph_schemas, &self.schema.raw_sdl); plan.data .query_plan .extract_authorization_metadata(&self.schema.definitions, &key); @@ -583,8 +585,9 @@ impl Service for BridgeQueryPlanner { Some(d) => d, }; - let schema = &this.schema.api_schema().definitions; - match add_defer_labels(schema, &doc.ast) { + let api_schema = this.schema.api_schema(); + let api_schema_definitions = &api_schema.definitions; + match add_defer_labels(api_schema_definitions, &doc.ast) { Err(e) => { return Err(QueryPlannerError::SpecError(SpecError::TransformError( e.to_string(), @@ -592,10 +595,12 @@ impl Service for BridgeQueryPlanner { } Ok(modified_query) => { let executable_document = modified_query - .to_executable_validate(schema) + .to_executable_validate(api_schema_definitions) + // Assume transformation creates a valid document: ignore conversion errors .map_err(|e| SpecError::ValidationError(e.into()))?; let hash = QueryHashVisitor::hash_query( - schema, + api_schema_definitions, + &api_schema.raw_sdl, &executable_document, operation_name.as_deref(), ) @@ -715,6 +720,7 @@ impl BridgeQueryPlanner { .map_err(|e| SpecError::ValidationError(e.into()))?; let hash = QueryHashVisitor::hash_query( &self.schema.definitions, + &self.schema.raw_sdl, &executable_document, key.operation_name.as_deref(), ) @@ -807,9 +813,13 @@ struct QueryPlan { } impl QueryPlan { - fn hash_subqueries(&mut self, schemas: &HashMap>>) { + fn hash_subqueries( + &mut self, + schemas: &HashMap>>, + supergraph_schema_hash: &str, + ) { if let Some(node) = self.node.as_mut() { - node.hash_subqueries(schemas); + node.hash_subqueries(schemas, supergraph_schema_hash); } } diff --git a/apollo-router/src/query_planner/caching_query_planner.rs b/apollo-router/src/query_planner/caching_query_planner.rs index 96689c131a..4e8172bd76 100644 --- a/apollo-router/src/query_planner/caching_query_planner.rs +++ b/apollo-router/src/query_planner/caching_query_planner.rs @@ -141,6 +141,7 @@ where hash, metadata, plan_options, + .. }, _, )| WarmUpCachingQueryKey { @@ -206,6 +207,7 @@ where query: query.clone(), operation: operation.clone(), hash: doc.hash.clone(), + sdl: Arc::clone(&self.schema.raw_sdl), metadata, plan_options, }; @@ -386,6 +388,7 @@ where query: request.query.clone(), operation: request.operation_name.to_owned(), hash: doc.hash.clone(), + sdl: Arc::clone(&self.schema.raw_sdl), metadata, plan_options, }; @@ -522,6 +525,7 @@ fn stats_report_key_hash(stats_report_key: &str) -> String { #[derive(Debug, Clone, PartialEq, Eq)] pub(crate) struct CachingQueryKey { pub(crate) query: String, + pub(crate) sdl: Arc, pub(crate) operation: Option, pub(crate) hash: Arc, pub(crate) metadata: CacheKeyMetadata, @@ -541,6 +545,7 @@ impl std::fmt::Display for CachingQueryKey { hasher.update( &serde_json::to_vec(&self.plan_options).expect("serialization should not fail"), ); + hasher.update(&serde_json::to_vec(&self.sdl).expect("serialization should not fail")); let metadata = hex::encode(hasher.finalize()); write!( diff --git a/apollo-router/src/query_planner/fetch.rs b/apollo-router/src/query_planner/fetch.rs index 07207b23b1..58c22cae0d 100644 --- a/apollo-router/src/query_planner/fetch.rs +++ b/apollo-router/src/query_planner/fetch.rs @@ -497,12 +497,20 @@ impl FetchNode { &self.operation_kind } - pub(crate) fn hash_subquery(&mut self, schema: &Valid) { + pub(crate) fn hash_subquery( + &mut self, + schema: &Valid, + supergraph_schema_hash: &str, + ) { let doc = ExecutableDocument::parse(schema, &self.operation, "query.graphql") .expect("subgraph queries should be valid"); - if let Ok(hash) = QueryHashVisitor::hash_query(schema, &doc, self.operation_name.as_deref()) - { + if let Ok(hash) = QueryHashVisitor::hash_query( + schema, + supergraph_schema_hash, + &doc, + self.operation_name.as_deref(), + ) { self.schema_aware_hash = Arc::new(QueryHash(hash)); } } diff --git a/apollo-router/src/query_planner/plan.rs b/apollo-router/src/query_planner/plan.rs index 4d0f76e37d..c734541fb3 100644 --- a/apollo-router/src/query_planner/plan.rs +++ b/apollo-router/src/query_planner/plan.rs @@ -315,40 +315,43 @@ impl PlanNode { pub(crate) fn hash_subqueries( &mut self, schemas: &HashMap>>, + supergraph_schema_hash: &str, ) { match self { PlanNode::Fetch(fetch_node) => { if let Some(schema) = schemas.get(&fetch_node.service_name) { - fetch_node.hash_subquery(schema); + fetch_node.hash_subquery(schema, supergraph_schema_hash); } } PlanNode::Sequence { nodes } => { for node in nodes { - node.hash_subqueries(schemas); + node.hash_subqueries(schemas, supergraph_schema_hash); } } PlanNode::Parallel { nodes } => { for node in nodes { - node.hash_subqueries(schemas); + node.hash_subqueries(schemas, supergraph_schema_hash); } } - PlanNode::Flatten(flatten) => flatten.node.hash_subqueries(schemas), + PlanNode::Flatten(flatten) => flatten + .node + .hash_subqueries(schemas, supergraph_schema_hash), PlanNode::Defer { primary, deferred } => { if let Some(node) = primary.node.as_mut() { - node.hash_subqueries(schemas); + node.hash_subqueries(schemas, supergraph_schema_hash); } for deferred_node in deferred { if let Some(node) = deferred_node.node.take() { let mut new_node = (*node).clone(); - new_node.hash_subqueries(schemas); + new_node.hash_subqueries(schemas, supergraph_schema_hash); deferred_node.node = Some(Arc::new(new_node)); } } } PlanNode::Subscription { primary: _, rest } => { if let Some(node) = rest.as_mut() { - node.hash_subqueries(schemas); + node.hash_subqueries(schemas, supergraph_schema_hash); } } PlanNode::Condition { @@ -357,10 +360,10 @@ impl PlanNode { else_clause, } => { if let Some(node) = if_clause.as_mut() { - node.hash_subqueries(schemas); + node.hash_subqueries(schemas, supergraph_schema_hash); } if let Some(node) = else_clause.as_mut() { - node.hash_subqueries(schemas); + node.hash_subqueries(schemas, supergraph_schema_hash); } } } diff --git a/apollo-router/src/query_planner/snapshots/apollo_router__query_planner__bridge_query_planner__tests__plan_root.snap b/apollo-router/src/query_planner/snapshots/apollo_router__query_planner__bridge_query_planner__tests__plan_root.snap index 5e07a0cfd1..974ccc03c5 100644 --- a/apollo-router/src/query_planner/snapshots/apollo_router__query_planner__bridge_query_planner__tests__plan_root.snap +++ b/apollo-router/src/query_planner/snapshots/apollo_router__query_planner__bridge_query_planner__tests__plan_root.snap @@ -14,7 +14,7 @@ Fetch( input_rewrites: None, output_rewrites: None, schema_aware_hash: QueryHash( - "68a86d7602ea2876e77b84d7942f585ef6b6101887bb2979d1f0af3b28c9a0ed", + "a4ab3ffe0fd7863aea8cd1e85d019d2c64ec0351d62f9759bed3c9dc707ea315", ), authorization: CacheKeyMetadata { is_authenticated: false, diff --git a/apollo-router/src/spec/query.rs b/apollo-router/src/spec/query.rs index 89a80aa77d..9f0170284c 100644 --- a/apollo-router/src/spec/query.rs +++ b/apollo-router/src/spec/query.rs @@ -280,8 +280,9 @@ impl Query { return Err(SpecError::ParseError(errors.into())); } }; - let schema = &schema.api_schema().definitions; - let executable_document = match ast.to_executable_validate(schema) { + let api_schema = schema.api_schema(); + let api_schema_definitions = &api_schema.definitions; + let executable_document = match ast.to_executable_validate(api_schema_definitions) { Ok(doc) => doc, Err(errors) => { return Err(SpecError::ValidationError(errors.into())); @@ -292,8 +293,14 @@ impl Query { let recursion_limit = parser.recursion_reached(); tracing::trace!(?recursion_limit, "recursion limit data"); - let hash = QueryHashVisitor::hash_query(schema, &executable_document, operation_name) - .map_err(|e| SpecError::QueryHashing(e.to_string()))?; + let hash = QueryHashVisitor::hash_query( + api_schema_definitions, + &api_schema.raw_sdl, + &executable_document, + operation_name, + ) + .map_err(|e| SpecError::QueryHashing(e.to_string()))?; + Ok(Arc::new(ParsedDocumentInner { ast, executable: Arc::new(executable_document), @@ -343,7 +350,7 @@ impl Query { .map(|operation| Operation::from_hir(operation, schema, &mut defer_stats, &fragments)) .collect::, SpecError>>()?; - let mut visitor = QueryHashVisitor::new(&schema.definitions, document); + let mut visitor = QueryHashVisitor::new(&schema.definitions, &schema.raw_sdl, document); traverse::document(&mut visitor, document, operation_name).map_err(|e| { SpecError::QueryHashing(format!("could not calculate the query hash: {e}")) })?; diff --git a/apollo-router/src/spec/query/change.rs b/apollo-router/src/spec/query/change.rs index bebb8efa72..eeb29efa37 100644 --- a/apollo-router/src/spec/query/change.rs +++ b/apollo-router/src/spec/query/change.rs @@ -33,11 +33,16 @@ pub(crate) const JOIN_TYPE_DIRECTIVE_NAME: &str = "join__type"; /// then the hash will stay the same pub(crate) struct QueryHashVisitor<'a> { schema: &'a schema::Schema, + // TODO: remove once introspection has been moved out of query planning + // For now, introspection is stiull handled by the planner, so when an + // introspection query is hashed, it should take the whole schema into account + schema_str: &'a str, hasher: Sha256, fragments: HashMap<&'a ast::Name, &'a Node>, hashed_types: HashSet, // name, field hashed_fields: HashSet<(String, String)>, + seen_introspection: bool, join_field_directive_name: Option, join_type_directive_name: Option, } @@ -45,14 +50,17 @@ pub(crate) struct QueryHashVisitor<'a> { impl<'a> QueryHashVisitor<'a> { pub(crate) fn new( schema: &'a schema::Schema, + schema_str: &'a str, executable: &'a executable::ExecutableDocument, ) -> Self { Self { schema, + schema_str, hasher: Sha256::new(), fragments: executable.fragments.iter().collect(), hashed_types: HashSet::new(), hashed_fields: HashSet::new(), + seen_introspection: false, // should we just return an error if we do not find those directives? join_field_directive_name: Schema::directive_name( schema, @@ -71,11 +79,13 @@ impl<'a> QueryHashVisitor<'a> { pub(crate) fn hash_query( schema: &'a schema::Schema, + schema_str: &'a str, executable: &'a executable::ExecutableDocument, operation_name: Option<&str>, ) -> Result, BoxError> { - let mut visitor = QueryHashVisitor::new(schema, executable); + let mut visitor = QueryHashVisitor::new(schema, schema_str, executable); traverse::document(&mut visitor, executable, operation_name)?; + executable.to_string().hash(&mut visitor); Ok(visitor.finish()) } @@ -357,6 +367,12 @@ impl<'a> Visitor for QueryHashVisitor<'a> { field_def: &ast::FieldDefinition, node: &executable::Field, ) -> Result<(), BoxError> { + if !self.seen_introspection && (field_def.name == "__schema" || field_def.name == "__type") + { + self.seen_introspection = true; + self.schema_str.hash(self); + } + self.hash_field( parent_type.to_string(), field_def.name.as_str().to_string(), @@ -411,9 +427,37 @@ mod tests { use super::QueryHashVisitor; use crate::spec::query::traverse; + #[derive(Debug)] + struct HashComparator { + from_visitor: String, + from_hash_query: String, + } + + impl From<(String, String)> for HashComparator { + fn from(value: (String, String)) -> Self { + Self { + from_visitor: value.0, + from_hash_query: value.1, + } + } + } + + // The non equality check is not the same + // as one would expect from PartialEq. + // This is why HashComparator doesn't implement it. + impl HashComparator { + fn equals(&self, other: &Self) -> bool { + self.from_visitor == other.from_visitor && self.from_hash_query == other.from_hash_query + } + fn doesnt_match(&self, other: &Self) -> bool { + // This is intentional, we check to prevent BOTH hashes from being equal + self.from_visitor != other.from_visitor && self.from_hash_query != other.from_hash_query + } + } + #[track_caller] - fn hash(schema: &str, query: &str) -> String { - let schema = Schema::parse(schema, "schema.graphql") + fn hash(schema_str: &str, query: &str) -> HashComparator { + let schema = Schema::parse(schema_str, "schema.graphql") .unwrap() .validate() .unwrap(); @@ -424,22 +468,26 @@ mod tests { .unwrap() .validate(&schema) .unwrap(); - let mut visitor = QueryHashVisitor::new(&schema, &exec); + let mut visitor = QueryHashVisitor::new(&schema, schema_str, &exec); traverse::document(&mut visitor, &exec, None).unwrap(); - hex::encode(visitor.finish()) + ( + hex::encode(visitor.finish()), + hex::encode(QueryHashVisitor::hash_query(&schema, schema_str, &exec, None).unwrap()), + ) + .into() } #[track_caller] - fn hash_subgraph_query(schema: &str, query: &str) -> String { - let schema = Valid::assume_valid(Schema::parse(schema, "schema.graphql").unwrap()); + fn hash_subgraph_query(schema_str: &str, query: &str) -> String { + let schema = Valid::assume_valid(Schema::parse(schema_str, "schema.graphql").unwrap()); let doc = Document::parse(query, "query.graphql").unwrap(); let exec = doc .to_executable(&schema) .unwrap() .validate(&schema) .unwrap(); - let mut visitor = QueryHashVisitor::new(&schema, &exec); + let mut visitor = QueryHashVisitor::new(&schema, schema_str, &exec); traverse::document(&mut visitor, &exec, None).unwrap(); hex::encode(visitor.finish()) @@ -479,20 +527,18 @@ mod tests { } "#; let query = "query { me { name } }"; - assert_eq!(hash(schema1, query), hash(schema2, query)); + assert!(hash(schema1, query).equals(&hash(schema2, query))); // id is nullable in 1, non nullable in 2 let query = "query { me { id name } }"; - assert_ne!(hash(schema1, query), hash(schema2, query)); + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); // simple normalization let query = "query { moi: me { name } }"; - assert_eq!(hash(schema1, query), hash(schema2, query)); + assert!(hash(schema1, query).equals(&hash(schema2, query))); - assert_ne!( - hash(schema1, "query { me { id name } }"), - hash(schema1, "query { me { name id } }") - ); + assert!(hash(schema1, "query { me { id name } }") + .doesnt_match(&hash(schema1, "query { me { name id } }"))); } #[test] @@ -532,13 +578,13 @@ mod tests { } "#; let query = "query { me { name } }"; - assert_eq!(hash(schema1, query), hash(schema2, query)); + assert!(hash(schema1, query).equals(&hash(schema2, query))); let query = "query { me { id name } }"; - assert_ne!(hash(schema1, query), hash(schema2, query)); + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); let query = "query { customer { id } }"; - assert_ne!(hash(schema1, query), hash(schema2, query)); + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); } #[test] @@ -586,13 +632,13 @@ mod tests { "#; let query = "query { me { id name } }"; - assert_eq!(hash(schema1, query), hash(schema2, query)); + assert!(hash(schema1, query).equals(&hash(schema2, query))); let query = "query { customer { id } }"; - assert_ne!(hash(schema1, query), hash(schema2, query)); + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); let query = "query { customer { ... on User { name } } }"; - assert_ne!(hash(schema1, query), hash(schema2, query)); + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); } #[test] @@ -614,19 +660,19 @@ mod tests { "#; let query = "query { a(i: 0) }"; - assert_ne!(hash(schema1, query), hash(schema2, query)); + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); let query = "query { b }"; - assert_ne!(hash(schema1, query), hash(schema2, query)); + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); let query = "query { b(i: 0)}"; - assert_ne!(hash(schema1, query), hash(schema2, query)); + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); let query = "query { c(j: 0)}"; - assert_ne!(hash(schema1, query), hash(schema2, query)); + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); let query = "query { c(i:0, j: 0)}"; - assert_ne!(hash(schema1, query), hash(schema2, query)); + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); } #[test] @@ -817,10 +863,10 @@ mod tests { } "#; let query = "query { me { name } }"; - assert_ne!(hash(schema1, query), hash(schema2, query)); + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); let query = "query { itf { name } }"; - assert_ne!(hash(schema1, query), hash(schema2, query)); + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); } #[test] @@ -945,9 +991,95 @@ mod tests { } "#; let query = "query { me { username } }"; - assert_ne!(hash(schema1, query), hash(schema2, query)); + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); let query = "query { me { a } }"; - assert_ne!(hash(schema1, query), hash(schema2, query)); + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); + } + + #[test] + fn introspection() { + let schema1: &str = r#" + schema { + query: Query + } + + type Query { + me: User + customer: User + } + + type User { + id: ID + name: String + } + "#; + + let schema2: &str = r#" + schema { + query: Query + } + + type Query { + me: NotUser + } + + + type NotUser { + id: ID! + name: String + } + "#; + + let query = "{ __schema { types { name } } }"; + + assert!(hash(schema1, query).doesnt_match(&hash(schema2, query))); + } + + #[test] + fn fields_with_different_arguments_have_different_hashes() { + let schema: &str = r#" + schema { + query: Query + } + + type Query { + test(arg: Int): String + } + "#; + + let query_one = "query { a: test(arg: 1) b: test(arg: 2) }"; + let query_two = "query { a: test(arg: 1) b: test(arg: 3) }"; + + // This assertion tests an internal hash function that isn't directly + // used for the query hash, and we'll need to make it pass to rely + // solely on the internal function again. + // + // assert!(hash(schema, query_one).doesnt_match(&hash(schema, + // query_two))); + assert!(hash(schema, query_one).from_hash_query != hash(schema, query_two).from_hash_query); + } + + #[test] + fn fields_with_different_aliases_have_different_hashes() { + let schema: &str = r#" + schema { + query: Query + } + + type Query { + test(arg: Int): String + } + "#; + + let query_one = "query { a: test }"; + let query_two = "query { b: test }"; + + // This assertion tests an internal hash function that isn't directly + // used for the query hash, and we'll need to make it pass to rely + // solely on the internal function again. + // + // assert!(hash(schema, query_one).doesnt_match(&hash(schema, query_two))); + assert!(hash(schema, query_one).from_hash_query != hash(schema, query_two).from_hash_query); } } diff --git a/apollo-router/tests/integration/redis.rs b/apollo-router/tests/integration/redis.rs index 5a340fea03..0eaf7ce948 100644 --- a/apollo-router/tests/integration/redis.rs +++ b/apollo-router/tests/integration/redis.rs @@ -28,12 +28,12 @@ mod test { // 2. run `docker compose up -d` and connect to the redis container by running `docker exec -ti /bin/bash`. // 3. Run the `redis-cli` command from the shell and start the redis `monitor` command. // 4. Run this test and yank the updated cache key from the redis logs. - let known_cache_key = "plan:v2.7.2:af1ee357bc75cfbbcc6adda41089a56e7d1d52f6d44c049739dde2c259314f58:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:2bf7810d3a47b31d8a77ebb09cdc784a3f77306827dc55b06770030a858167c7"; + let known_cache_key = "plan:v2.7.2:121b9859eba2d8fa6dde0a54b6e3781274cf69f7ffb0af912e92c01c6bfff6ca:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:5c7a72fa35639949328548d77b56dba2e77d0dfa90c19b69978da119e996bb92"; - let config = RedisConfig::from_url("redis://127.0.0.1:6379")?; + let config = RedisConfig::from_url("redis://127.0.0.1:6379").unwrap(); let client = RedisClient::new(config, None, None, None); let connection_task = client.connect(); - client.wait_for_connect().await?; + client.wait_for_connect().await.unwrap(); client.del::(known_cache_key).await.unwrap(); @@ -53,17 +53,25 @@ mod test { } } } - }))? + })) + .unwrap() .schema(include_str!("../fixtures/supergraph.graphql")) .build_supergraph() - .await?; + .await + .unwrap(); let request = supergraph::Request::fake_builder() .query(r#"{ topProducts { name name2:name } }"#) .method(Method::POST) - .build()?; + .build() + .unwrap(); - let _ = supergraph.oneshot(request).await?.next_response().await; + let _ = supergraph + .oneshot(request) + .await + .unwrap() + .next_response() + .await; let s: String = match client.get(known_cache_key).await { Ok(s) => s, @@ -116,16 +124,24 @@ mod test { } } } - }))? + })) + .unwrap() .schema(include_str!("../fixtures/supergraph.graphql")) .build_supergraph() - .await?; + .await + .unwrap(); let request = supergraph::Request::fake_builder() .query(r#"{ topProducts { name name2:name } }"#) .method(Method::POST) - .build()?; - let _ = supergraph.oneshot(request).await?.next_response().await; + .build() + .unwrap(); + let _ = supergraph + .oneshot(request) + .await + .unwrap() + .next_response() + .await; let new_exp: i64 = client .custom_raw(cmd!("EXPIRETIME"), vec![known_cache_key.to_string()]) .await @@ -135,7 +151,7 @@ mod test { assert!(exp < new_exp); - client.quit().await?; + client.quit().await.unwrap(); // calling quit ends the connection and event listener tasks let _ = connection_task.await; Ok(()) @@ -149,10 +165,10 @@ mod test { #[tokio::test(flavor = "multi_thread")] async fn apq() -> Result<(), BoxError> { - let config = RedisConfig::from_url("redis://127.0.0.1:6379")?; + let config = RedisConfig::from_url("redis://127.0.0.1:6379").unwrap(); let client = RedisClient::new(config, None, None, None); let connection_task = client.connect(); - client.wait_for_connect().await?; + client.wait_for_connect().await.unwrap(); let config = json!({ "apq": { @@ -172,10 +188,12 @@ mod test { let router = apollo_router::TestHarness::builder() .with_subgraph_network_requests() - .configuration_json(config.clone())? + .configuration_json(config.clone()) + .unwrap() .schema(include_str!("../fixtures/supergraph.graphql")) .build_router() - .await?; + .await + .unwrap(); let query_hash = "4c45433039407593557f8a982dafd316a66ec03f0e1ed5fa1b7ef8060d76e8ec"; @@ -194,19 +212,22 @@ mod test { let request: router::Request = supergraph::Request::fake_builder() .extension("persistedQuery", persisted.clone()) .method(Method::POST) - .build()? + .build() + .unwrap() .try_into() .unwrap(); let res = router .clone() .oneshot(request) - .await? + .await + .unwrap() .into_graphql_response_stream() .await .next() .await - .unwrap()?; + .unwrap() + .unwrap(); assert_eq!( res.errors.first().unwrap().message, "PersistedQueryNotFound" @@ -220,19 +241,22 @@ mod test { .query(r#"{ topProducts { name name2:name } }"#) .extension("persistedQuery", persisted.clone()) .method(Method::POST) - .build()? + .build() + .unwrap() .try_into() .unwrap(); let res = router .clone() .oneshot(request) - .await? + .await + .unwrap() .into_graphql_response_stream() .await .next() .await - .unwrap()?; + .unwrap() + .unwrap(); assert!(res.data.is_some()); assert!(res.errors.is_empty()); @@ -243,32 +267,37 @@ mod test { // it should have the same connection to Redis, but the in memory cache has been reset let router = apollo_router::TestHarness::builder() .with_subgraph_network_requests() - .configuration_json(config.clone())? + .configuration_json(config.clone()) + .unwrap() .schema(include_str!("../fixtures/supergraph.graphql")) .build_router() - .await?; + .await + .unwrap(); // a request with only the hash should succeed because it is stored in Redis let request: router::Request = supergraph::Request::fake_builder() .extension("persistedQuery", persisted.clone()) .method(Method::POST) - .build()? + .build() + .unwrap() .try_into() .unwrap(); let res = router .clone() .oneshot(request) - .await? + .await + .unwrap() .into_graphql_response_stream() .await .next() .await - .unwrap()?; + .unwrap() + .unwrap(); assert!(res.data.is_some()); assert!(res.errors.is_empty()); - client.quit().await?; + client.quit().await.unwrap(); // calling quit ends the connection and event listener tasks let _ = connection_task.await; Ok(()) @@ -276,10 +305,10 @@ mod test { #[tokio::test(flavor = "multi_thread")] async fn entity_cache() -> Result<(), BoxError> { - let config = RedisConfig::from_url("redis://127.0.0.1:6379")?; + let config = RedisConfig::from_url("redis://127.0.0.1:6379").unwrap(); let client = RedisClient::new(config, None, None, None); let connection_task = client.connect(); - client.wait_for_connect().await?; + client.wait_for_connect().await.unwrap(); let mut subgraphs = MockedSubgraphs::default(); subgraphs.insert( @@ -357,7 +386,8 @@ mod test { "include_subgraph_errors": { "all": true } - }))? + })) + .unwrap() .extra_plugin(subgraphs) .schema(include_str!("../fixtures/supergraph-auth.graphql")) .build_supergraph() @@ -367,24 +397,26 @@ mod test { let request = supergraph::Request::fake_builder() .query(r#"{ topProducts { name reviews { body } } }"#) .method(Method::POST) - .build()?; + .build() + .unwrap(); let response = supergraph .oneshot(request) - .await? + .await + .unwrap() .next_response() .await .unwrap(); insta::assert_json_snapshot!(response); let s:String = client - .get("subgraph:products:Query:07bd08ba4eb8b85451edd3b3aae3c3ad3dc0892d86deedde6e6d53f6415f807f:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") + .get("subgraph:products:Query:0df945dc1bc08f7fc02e8905b4c72aa9112f29bb7a214e4a38d199f0aa635b48:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") .await .unwrap(); let v: Value = serde_json::from_str(&s).unwrap(); insta::assert_json_snapshot!(v.as_object().unwrap().get("data").unwrap()); - let s: String = client.get("subgraph:reviews:Product:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:826d5cf03645266e30655c7475530e2d40e0d5978595b0ab16318b1ce87c0fe1:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c").await.unwrap(); + let s: String = client.get("subgraph:reviews:Product:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:1de543dab57fde0f00247922ccc4f76d4c916ae26a89dd83cd1a62300d0cda20:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c").await.unwrap(); let v: Value = serde_json::from_str(&s).unwrap(); insta::assert_json_snapshot!(v.as_object().unwrap().get("data").unwrap()); @@ -459,7 +491,8 @@ mod test { "include_subgraph_errors": { "all": true } - }))? + })) + .unwrap() .extra_plugin(subgraphs) .schema(include_str!("../fixtures/supergraph-auth.graphql")) .build_supergraph() @@ -469,24 +502,26 @@ mod test { let request = supergraph::Request::fake_builder() .query(r#"{ topProducts(first: 2) { name reviews { body } } }"#) .method(Method::POST) - .build()?; + .build() + .unwrap(); let response = supergraph .oneshot(request) - .await? + .await + .unwrap() .next_response() .await .unwrap(); insta::assert_json_snapshot!(response); let s:String = client - .get("subgraph:reviews:Product:d9a4cd73308dd13ca136390c10340823f94c335b9da198d2339c886c738abf0d:826d5cf03645266e30655c7475530e2d40e0d5978595b0ab16318b1ce87c0fe1:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") + .get("subgraph:reviews:Product:d9a4cd73308dd13ca136390c10340823f94c335b9da198d2339c886c738abf0d:1de543dab57fde0f00247922ccc4f76d4c916ae26a89dd83cd1a62300d0cda20:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") .await .unwrap(); let v: Value = serde_json::from_str(&s).unwrap(); insta::assert_json_snapshot!(v.as_object().unwrap().get("data").unwrap()); - client.quit().await?; + client.quit().await.unwrap(); // calling quit ends the connection and event listener tasks let _ = connection_task.await; Ok(()) @@ -494,10 +529,10 @@ mod test { #[tokio::test(flavor = "multi_thread")] async fn entity_cache_authorization() -> Result<(), BoxError> { - let config = RedisConfig::from_url("redis://127.0.0.1:6379")?; + let config = RedisConfig::from_url("redis://127.0.0.1:6379").unwrap(); let client = RedisClient::new(config, None, None, None); let connection_task = client.connect(); - client.wait_for_connect().await?; + client.wait_for_connect().await.unwrap(); let mut subgraphs = MockedSubgraphs::default(); subgraphs.insert( @@ -659,7 +694,8 @@ mod test { "include_subgraph_errors": { "all": true } - }))? + })) + .unwrap() .extra_plugin(subgraphs) .schema(include_str!("../fixtures/supergraph-auth.graphql")) .build_supergraph() @@ -679,19 +715,21 @@ mod test { ) .context(context) .method(Method::POST) - .build()?; + .build() + .unwrap(); let response = supergraph .clone() .oneshot(request) - .await? + .await + .unwrap() .next_response() .await .unwrap(); insta::assert_json_snapshot!(response); let s:String = client - .get("subgraph:products:Query:07bd08ba4eb8b85451edd3b3aae3c3ad3dc0892d86deedde6e6d53f6415f807f:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") + .get("subgraph:products:Query:0df945dc1bc08f7fc02e8905b4c72aa9112f29bb7a214e4a38d199f0aa635b48:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") .await .unwrap(); let v: Value = serde_json::from_str(&s).unwrap(); @@ -712,7 +750,7 @@ mod test { ); let s: String = client - .get("subgraph:reviews:Product:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:826d5cf03645266e30655c7475530e2d40e0d5978595b0ab16318b1ce87c0fe1:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") + .get("subgraph:reviews:Product:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:1de543dab57fde0f00247922ccc4f76d4c916ae26a89dd83cd1a62300d0cda20:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") .await .unwrap(); let v: Value = serde_json::from_str(&s).unwrap(); @@ -744,19 +782,21 @@ mod test { ) .context(context) .method(Method::POST) - .build()?; + .build() + .unwrap(); let response = supergraph .clone() .oneshot(request) - .await? + .await + .unwrap() .next_response() .await .unwrap(); insta::assert_json_snapshot!(response); let s:String = client - .get("subgraph:reviews:Product:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:c75297b98da101021e30020db99a3a11c2f9ac2008de94ce410c47940162e304:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") + .get("subgraph:reviews:Product:4911f7a9dbad8a47b8900d65547503a2f3c0359f65c0bc5652ad9b9843281f66:3b6ef3c8fd34c469d59f513942c5f4c8f91135e828712de2024e2cd4613c50ae:d9d84a3c7ffc27b0190a671212f3740e5b8478e84e23825830e97822e25cf05c") .await .unwrap(); let v: Value = serde_json::from_str(&s).unwrap(); @@ -789,18 +829,20 @@ mod test { ) .context(context) .method(Method::POST) - .build()?; + .build() + .unwrap(); let response = supergraph .clone() .oneshot(request) - .await? + .await + .unwrap() .next_response() .await .unwrap(); insta::assert_json_snapshot!(response); - client.quit().await?; + client.quit().await.unwrap(); // calling quit ends the connection and event listener tasks let _ = connection_task.await; Ok(()) diff --git a/apollo-router/tests/integration/snapshots/integration_tests__integration__redis__test__query_planner.snap b/apollo-router/tests/integration/snapshots/integration_tests__integration__redis__test__query_planner.snap index 1fad999d1a..4714fe2240 100644 --- a/apollo-router/tests/integration/snapshots/integration_tests__integration__redis__test__query_planner.snap +++ b/apollo-router/tests/integration/snapshots/integration_tests__integration__redis__test__query_planner.snap @@ -12,7 +12,7 @@ expression: query_plan "id": null, "inputRewrites": null, "outputRewrites": null, - "schemaAwareHash": "af1ee357bc75cfbbcc6adda41089a56e7d1d52f6d44c049739dde2c259314f58", + "schemaAwareHash": "121b9859eba2d8fa6dde0a54b6e3781274cf69f7ffb0af912e92c01c6bfff6ca", "authorization": { "is_authenticated": false, "scopes": [], diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml index f8558665b5..df99953681 100644 --- a/dockerfiles/tracing/docker-compose.datadog.yml +++ b/dockerfiles/tracing/docker-compose.datadog.yml @@ -3,7 +3,7 @@ services: apollo-router: container_name: apollo-router - image: ghcr.io/apollographql/router:v1.45.0 + image: ghcr.io/apollographql/router:v1.45.1 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/datadog.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.jaeger.yml b/dockerfiles/tracing/docker-compose.jaeger.yml index 6d224643a0..4d3f756a81 100644 --- a/dockerfiles/tracing/docker-compose.jaeger.yml +++ b/dockerfiles/tracing/docker-compose.jaeger.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router #build: ./router - image: ghcr.io/apollographql/router:v1.45.0 + image: ghcr.io/apollographql/router:v1.45.1 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/jaeger.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml index 8dbfeb308f..f4887b62e6 100644 --- a/dockerfiles/tracing/docker-compose.zipkin.yml +++ b/dockerfiles/tracing/docker-compose.zipkin.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router build: ./router - image: ghcr.io/apollographql/router:v1.45.0 + image: ghcr.io/apollographql/router:v1.45.1 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/zipkin.router.yaml:/etc/config/configuration.yaml diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml index f5080d2a41..a2882ca4c3 100644 --- a/helm/chart/router/Chart.yaml +++ b/helm/chart/router/Chart.yaml @@ -20,10 +20,10 @@ type: application # so it matches the shape of our release process and release automation. # By proxy of that decision, this version uses SemVer 2.0.0, though the prefix # of "v" is not included. -version: 1.45.0 +version: 1.45.1 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "v1.45.0" +appVersion: "v1.45.1" diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md index bb2f7a13b7..287502706b 100644 --- a/helm/chart/router/README.md +++ b/helm/chart/router/README.md @@ -2,7 +2,7 @@ [router](https://github.com/apollographql/router) Rust Graph Routing runtime for Apollo Federation -![Version: 1.45.0](https://img.shields.io/badge/Version-1.45.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.45.0](https://img.shields.io/badge/AppVersion-v1.45.0-informational?style=flat-square) +![Version: 1.45.1](https://img.shields.io/badge/Version-1.45.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.45.1](https://img.shields.io/badge/AppVersion-v1.45.1-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ ## Get Repo Info ```console -helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0 +helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.45.1 ``` ## Install Chart @@ -19,7 +19,7 @@ helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0 **Important:** only helm3 is supported ```console -helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.45.0 --values my-values.yaml +helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.45.1 --values my-values.yaml ``` _See [configuration](#configuration) below._ diff --git a/licenses.html b/licenses.html index 730fe407df..1e2c8a0a04 100644 --- a/licenses.html +++ b/licenses.html @@ -44,7 +44,7 @@

Third Party Licenses

Overview of licenses:

    -
  • Apache License 2.0 (508)
  • +
  • Apache License 2.0 (481)
  • MIT License (154)
  • BSD 3-Clause "New" or "Revised" License (12)
  • ISC License (11)
  • @@ -4652,204 +4652,6 @@

    Used by:

    of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS - - -
  • -

    Apache License 2.0

    -

    Used by:

    - -
                                     Apache License
    -                           Version 2.0, January 2004
    -                        http://www.apache.org/licenses/
    -
    -   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
    -
    -   1. Definitions.
    -
    -      "License" shall mean the terms and conditions for use, reproduction,
    -      and distribution as defined by Sections 1 through 9 of this document.
    -
    -      "Licensor" shall mean the copyright owner or entity authorized by
    -      the copyright owner that is granting the License.
    -
    -      "Legal Entity" shall mean the union of the acting entity and all
    -      other entities that control, are controlled by, or are under common
    -      control with that entity. For the purposes of this definition,
    -      "control" means (i) the power, direct or indirect, to cause the
    -      direction or management of such entity, whether by contract or
    -      otherwise, or (ii) ownership of fifty percent (50%) or more of the
    -      outstanding shares, or (iii) beneficial ownership of such entity.
    -
    -      "You" (or "Your") shall mean an individual or Legal Entity
    -      exercising permissions granted by this License.
    -
    -      "Source" form shall mean the preferred form for making modifications,
    -      including but not limited to software source code, documentation
    -      source, and configuration files.
    -
    -      "Object" form shall mean any form resulting from mechanical
    -      transformation or translation of a Source form, including but
    -      not limited to compiled object code, generated documentation,
    -      and conversions to other media types.
    -
    -      "Work" shall mean the work of authorship, whether in Source or
    -      Object form, made available under the License, as indicated by a
    -      copyright notice that is included in or attached to the work
    -      (an example is provided in the Appendix below).
    -
    -      "Derivative Works" shall mean any work, whether in Source or Object
    -      form, that is based on (or derived from) the Work and for which the
    -      editorial revisions, annotations, elaborations, or other modifications
    -      represent, as a whole, an original work of authorship. For the purposes
    -      of this License, Derivative Works shall not include works that remain
    -      separable from, or merely link (or bind by name) to the interfaces of,
    -      the Work and Derivative Works thereof.
    -
    -      "Contribution" shall mean any work of authorship, including
    -      the original version of the Work and any modifications or additions
    -      to that Work or Derivative Works thereof, that is intentionally
    -      submitted to Licensor for inclusion in the Work by the copyright owner
    -      or by an individual or Legal Entity authorized to submit on behalf of
    -      the copyright owner. For the purposes of this definition, "submitted"
    -      means any form of electronic, verbal, or written communication sent
    -      to the Licensor or its representatives, including but not limited to
    -      communication on electronic mailing lists, source code control systems,
    -      and issue tracking systems that are managed by, or on behalf of, the
    -      Licensor for the purpose of discussing and improving the Work, but
    -      excluding communication that is conspicuously marked or otherwise
    -      designated in writing by the copyright owner as "Not a Contribution."
    -
    -      "Contributor" shall mean Licensor and any individual or Legal Entity
    -      on behalf of whom a Contribution has been received by Licensor and
    -      subsequently incorporated within the Work.
    -
    -   2. Grant of Copyright License. Subject to the terms and conditions of
    -      this License, each Contributor hereby grants to You a perpetual,
    -      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -      copyright license to reproduce, prepare Derivative Works of,
    -      publicly display, publicly perform, sublicense, and distribute the
    -      Work and such Derivative Works in Source or Object form.
    -
    -   3. Grant of Patent License. Subject to the terms and conditions of
    -      this License, each Contributor hereby grants to You a perpetual,
    -      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
    -      (except as stated in this section) patent license to make, have made,
    -      use, offer to sell, sell, import, and otherwise transfer the Work,
    -      where such license applies only to those patent claims licensable
    -      by such Contributor that are necessarily infringed by their
    -      Contribution(s) alone or by combination of their Contribution(s)
    -      with the Work to which such Contribution(s) was submitted. If You
    -      institute patent litigation against any entity (including a
    -      cross-claim or counterclaim in a lawsuit) alleging that the Work
    -      or a Contribution incorporated within the Work constitutes direct
    -      or contributory patent infringement, then any patent licenses
    -      granted to You under this License for that Work shall terminate
    -      as of the date such litigation is filed.
    -
    -   4. Redistribution. You may reproduce and distribute copies of the
    -      Work or Derivative Works thereof in any medium, with or without
    -      modifications, and in Source or Object form, provided that You
    -      meet the following conditions:
    -
    -      (a) You must give any other recipients of the Work or
    -          Derivative Works a copy of this License; and
    -
    -      (b) You must cause any modified files to carry prominent notices
    -          stating that You changed the files; and
    -
    -      (c) You must retain, in the Source form of any Derivative Works
    -          that You distribute, all copyright, patent, trademark, and
    -          attribution notices from the Source form of the Work,
    -          excluding those notices that do not pertain to any part of
    -          the Derivative Works; and
    -
    -      (d) If the Work includes a "NOTICE" text file as part of its
    -          distribution, then any Derivative Works that You distribute must
    -          include a readable copy of the attribution notices contained
    -          within such NOTICE file, excluding those notices that do not
    -          pertain to any part of the Derivative Works, in at least one
    -          of the following places: within a NOTICE text file distributed
    -          as part of the Derivative Works; within the Source form or
    -          documentation, if provided along with the Derivative Works; or,
    -          within a display generated by the Derivative Works, if and
    -          wherever such third-party notices normally appear. The contents
    -          of the NOTICE file are for informational purposes only and
    -          do not modify the License. You may add Your own attribution
    -          notices within Derivative Works that You distribute, alongside
    -          or as an addendum to the NOTICE text from the Work, provided
    -          that such additional attribution notices cannot be construed
    -          as modifying the License.
    -
    -      You may add Your own copyright statement to Your modifications and
    -      may provide additional or different license terms and conditions
    -      for use, reproduction, or distribution of Your modifications, or
    -      for any such Derivative Works as a whole, provided Your use,
    -      reproduction, and distribution of the Work otherwise complies with
    -      the conditions stated in this License.
    -
    -   5. Submission of Contributions. Unless You explicitly state otherwise,
    -      any Contribution intentionally submitted for inclusion in the Work
    -      by You to the Licensor shall be under the terms and conditions of
    -      this License, without any additional terms or conditions.
    -      Notwithstanding the above, nothing herein shall supersede or modify
    -      the terms of any separate license agreement you may have executed
    -      with Licensor regarding such Contributions.
    -
    -   6. Trademarks. This License does not grant permission to use the trade
    -      names, trademarks, service marks, or product names of the Licensor,
    -      except as required for reasonable and customary use in describing the
    -      origin of the Work and reproducing the content of the NOTICE file.
    -
    -   7. Disclaimer of Warranty. Unless required by applicable law or
    -      agreed to in writing, Licensor provides the Work (and each
    -      Contributor provides its Contributions) on an "AS IS" BASIS,
    -      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
    -      implied, including, without limitation, any warranties or conditions
    -      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
    -      PARTICULAR PURPOSE. You are solely responsible for determining the
    -      appropriateness of using or redistributing the Work and assume any
    -      risks associated with Your exercise of permissions under this License.
    -
    -   8. Limitation of Liability. In no event and under no legal theory,
    -      whether in tort (including negligence), contract, or otherwise,
    -      unless required by applicable law (such as deliberate and grossly
    -      negligent acts) or agreed to in writing, shall any Contributor be
    -      liable to You for damages, including any direct, indirect, special,
    -      incidental, or consequential damages of any character arising as a
    -      result of this License or out of the use or inability to use the
    -      Work (including but not limited to damages for loss of goodwill,
    -      work stoppage, computer failure or malfunction, or any and all
    -      other commercial damages or losses), even if such Contributor
    -      has been advised of the possibility of such damages.
    -
    -   9. Accepting Warranty or Additional Liability. While redistributing
    -      the Work or Derivative Works thereof, You may choose to offer,
    -      and charge a fee for, acceptance of support, warranty, indemnity,
    -      or other liability obligations and/or rights consistent with this
    -      License. However, in accepting such obligations, You may act only
    -      on Your own behalf and on Your sole responsibility, not on behalf
    -      of any other Contributor, and only if You agree to indemnify,
    -      defend, and hold each Contributor harmless for any liability
    -      incurred by, or claims asserted against, such Contributor by reason
    -      of your accepting any such warranty or additional liability.
    -
    -   END OF TERMS AND CONDITIONS
    -
    -   Copyright 2019 Yoshua Wuyts
    -
    -   Licensed under the Apache License, Version 2.0 (the "License");
    -   you may not use this file except in compliance with the License.
    -   You may obtain a copy of the License at
    -
    -       http://www.apache.org/licenses/LICENSE-2.0
    -
    -   Unless required by applicable law or agreed to in writing, software
    -   distributed under the License is distributed on an "AS IS" BASIS,
    -   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -   See the License for the specific language governing permissions and
    -   limitations under the License.
     
  • @@ -8411,26 +8213,13 @@

    Used by:

  • arbitrary
  • arc-swap
  • async-channel
  • -
  • async-channel
  • async-compression
  • -
  • async-executor
  • -
  • async-global-executor
  • -
  • async-io
  • -
  • async-io
  • -
  • async-lock
  • -
  • async-lock
  • -
  • async-process
  • -
  • async-signal
  • -
  • async-std
  • -
  • async-task
  • -
  • atomic-waker
  • autocfg
  • backtrace
  • base64
  • base64
  • bitflags
  • bitflags
  • -
  • blocking
  • bstr
  • bumpalo
  • bytes-utils
  • @@ -8453,9 +8242,6 @@

    Used by:

  • envmnt
  • equivalent
  • event-listener
  • -
  • event-listener
  • -
  • event-listener
  • -
  • event-listener-strategy
  • fastrand
  • fastrand
  • filetime
  • @@ -8466,7 +8252,6 @@

    Used by:

  • fraction
  • fsio
  • futures-lite
  • -
  • futures-lite
  • gimli
  • git2
  • group
  • @@ -8485,7 +8270,6 @@

    Used by:

  • indexmap
  • indexmap
  • inventory
  • -
  • io-lifetimes
  • ipconfig
  • itertools
  • itertools
  • @@ -8499,7 +8283,6 @@

    Used by:

  • libz-ng-sys
  • libz-sys
  • linux-raw-sys
  • -
  • linux-raw-sys
  • lock_api
  • log
  • maplit
  • @@ -8531,10 +8314,8 @@

    Used by:

  • pest_generator
  • pest_meta
  • petgraph
  • -
  • piper
  • pkg-config
  • platforms
  • -
  • polling
  • proc-macro2
  • prost
  • prost
  • @@ -8556,7 +8337,6 @@

    Used by:

  • rustc_version
  • rustc_version
  • rustix
  • -
  • rustix
  • rustls
  • rustls-native-certs
  • rustls-pemfile
  • @@ -8574,7 +8354,6 @@

    Used by:

  • similar
  • smallvec
  • socket2
  • -
  • socket2
  • stable_deref_trait
  • syn
  • syn
  • @@ -8599,7 +8378,6 @@

    Used by:

  • unicode-xid
  • url
  • uuid
  • -
  • value-bag
  • version_check
  • waker-fn
  • wasi
  • @@ -11105,7 +10883,6 @@

    Used by:

                                  Apache License
    @@ -12417,7 +12194,6 @@ 

    Used by:

  • deno-proc-macro-rules
  • deno-proc-macro-rules-macros
  • dunce
  • -
  • gloo-timers
  • graphql-introspection-query
  • graphql_client
  • graphql_client_codegen
  • diff --git a/scripts/install.sh b/scripts/install.sh index 8f5c6c2fac..e938a1fc4e 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="https://github.com/apollographql/router/releases/downloa # Router version defined in apollo-router's Cargo.toml # Note: Change this line manually during the release steps. -PACKAGE_VERSION="v1.45.0" +PACKAGE_VERSION="v1.45.1" download_binary() { downloader --check