From 95f20ceca2b193ff95598591baf84c76a9ce6c55 Mon Sep 17 00:00:00 2001 From: o0Ignition0o Date: Fri, 1 Sep 2023 10:41:21 +0200 Subject: [PATCH 01/50] Deal with interfaces on fragment spreads when no __typename is queried Fix #2587 Operations would over rely on the presence of __typename to resolve selection sets on interface implementers. This changeset checks for the parent type in an InlineFragment, so we don't drop relevant selection set when applicable. --- .../src/services/supergraph_service.rs | 131 ++++++++++++++++++ apollo-router/src/spec/query.rs | 23 +-- apollo-router/src/spec/query/tests.rs | 42 +++++- 3 files changed, 182 insertions(+), 14 deletions(-) diff --git a/apollo-router/src/services/supergraph_service.rs b/apollo-router/src/services/supergraph_service.rs index 7471a059b9..d7bad236b5 100644 --- a/apollo-router/src/services/supergraph_service.rs +++ b/apollo-router/src/services/supergraph_service.rs @@ -2924,4 +2924,135 @@ mod tests { insta::assert_json_snapshot!(stream.next_response().await.unwrap()); } + + #[tokio::test] + async fn no_typename_on_interface() { + let subgraphs = MockedSubgraphs([ + ("animal", MockSubgraph::builder().with_json( + serde_json::json!{{"query":"query dog__animal__0{dog{id name}}", "operationName": "dog__animal__0"}}, + serde_json::json!{{"data":{"dog":{"id":"4321","name":"Spot"}}}} + ).with_json( + serde_json::json!{{"query":"query dog__animal__0{dog{__typename id name}}", "operationName": "dog__animal__0"}}, + serde_json::json!{{"data":{"dog":{"__typename":"Dog","id":"4321","name":"Spot"}}}} + ).build()), + ].into_iter().collect()); + + let service = TestHarness::builder() + .configuration_json(serde_json::json!({"include_subgraph_errors": { "all": true } })) + .unwrap() + .schema( + r#"schema + @core(feature: "https://specs.apollo.dev/core/v0.2"), + @core(feature: "https://specs.apollo.dev/join/v0.1", for: EXECUTION) + { + query: Query + } + directive @core(as: String, feature: String!, for: core__Purpose) repeatable on SCHEMA + directive @join__field(graph: join__Graph, provides: join__FieldSet, requires: join__FieldSet) on FIELD_DEFINITION + directive @join__graph(name: String!, url: String!) on ENUM_VALUE + directive @join__owner(graph: join__Graph!) on INTERFACE | OBJECT + directive @join__type(graph: join__Graph!, key: join__FieldSet) repeatable on INTERFACE | OBJECT + + interface Animal { + id: String! + } + + type Dog implements Animal { + id: String! + name: String! + } + + type Query { + animal: Animal! @join__field(graph: ANIMAL) + dog: Dog! @join__field(graph: ANIMAL) + } + + enum core__Purpose { + """ + `EXECUTION` features provide metadata necessary to for operation execution. + """ + EXECUTION + + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + } + + scalar join__FieldSet + + enum join__Graph { + ANIMAL @join__graph(name: "animal" url: "http://localhost:8080/query") + } + "#, + ) + .extra_plugin(subgraphs) + .build_supergraph() + .await + .unwrap(); + + let request = supergraph::Request::fake_builder() + .context(defer_context()) + .query( + "query dog { + dog { + ...on Animal { + id + ...on Dog { + name + } + } + } + }", + ) + .build() + .unwrap(); + + let mut stream = service.clone().oneshot(request).await.unwrap(); + + let no_typename = stream.next_response().await.unwrap(); + + let request = supergraph::Request::fake_builder() + .context(defer_context()) + .query( + "query dog { + dog { + ...on Animal { + id + __typename + ...on Dog { + name + } + } + } + }", + ) + .build() + .unwrap(); + + let mut stream = service.oneshot(request).await.unwrap(); + + let with_typename = stream.next_response().await.unwrap(); + assert_eq!( + with_typename + .data + .clone() + .unwrap() + .get("dog") + .unwrap() + .get("name") + .unwrap(), + no_typename + .data + .clone() + .unwrap() + .get("dog") + .unwrap() + .get("name") + .unwrap(), + "{:?}\n{:?}", + with_typename, + no_typename + ); + } } diff --git a/apollo-router/src/spec/query.rs b/apollo-router/src/spec/query.rs index 10b9775310..8d44c55175 100644 --- a/apollo-router/src/spec/query.rs +++ b/apollo-router/src/spec/query.rs @@ -680,21 +680,18 @@ impl Query { let is_apply = if let Some(input_type) = input.get(TYPENAME).and_then(|val| val.as_str()) { - // check if the fragment matches the input type directly, and if not, check if the + // Only check if the fragment matches the input type directly, and if not, check if the // input type is a subtype of the fragment's type condition (interface, union) input_type == type_condition.as_str() || parameters.schema.is_subtype(type_condition, input_type) } else { - // known_type = true means that from the query's shape, we know - // we should get the right type here. But in the case we get a - // __typename field and it does not match, we should not apply - // that fragment - // If the type condition is an interface and the current known type implements it known_type - .as_ref() - .map(|k| parameters.schema.is_subtype(type_condition, k)) + .as_ref() + // We have no typename, we apply the selection set if the known_type implements the type_condition + .map(|k| is_subtype_or_same(parameters, type_condition, k)) .unwrap_or_default() - || known_type.as_deref() == Some(type_condition.as_str()) + // Or if the known_type implements the parent's type_condition because we're in an inline fragment. + || is_subtype_or_same(parameters, &parent_type.name(), type_condition) }; if is_apply { @@ -1072,6 +1069,14 @@ impl Query { } } +fn is_subtype_or_same( + parameters: &FormatParameters<'_>, + parent: &String, + maybe_child: &String, +) -> bool { + parent == maybe_child || parameters.schema.is_subtype(parent, maybe_child) +} + /// Intermediate structure for arguments passed through the entire formatting struct FormatParameters<'a> { variables: &'a Object, diff --git a/apollo-router/src/spec/query/tests.rs b/apollo-router/src/spec/query/tests.rs index 6da93dc0d4..32c08139ab 100644 --- a/apollo-router/src/spec/query/tests.rs +++ b/apollo-router/src/spec/query/tests.rs @@ -18,6 +18,26 @@ macro_rules! assert_eq_and_ordered { }; } +macro_rules! assert_eq_and_ordered_json { + ($a:expr, $b:expr $(,)?) => { + assert_eq!( + $a, + $b, + "assertion failed: objects are not the same:\ + \n left: `{}`\n right: `{}`", + serde_json::to_string(&$a).unwrap(), + serde_json::to_string(&$b).unwrap() + ); + assert!( + $a.eq_and_ordered(&$b), + "assertion failed: objects are not ordered the same:\ + \n left: `{}`\n right: `{}`", + serde_json::to_string(&$a).unwrap(), + serde_json::to_string(&$b).unwrap(), + ); + }; +} + #[derive(Default)] struct FormatTest { schema: Option<&'static str>, @@ -122,15 +142,21 @@ impl FormatTest { ); if let Some(e) = self.expected { - assert_eq_and_ordered!(response.data.as_ref().unwrap(), &e); + assert_eq_and_ordered_json!( + serde_json_bytes::to_value(response.data.as_ref()).unwrap(), + e + ); } if let Some(e) = self.expected_errors { - assert_eq_and_ordered!(serde_json_bytes::to_value(&response.errors).unwrap(), e); + assert_eq_and_ordered_json!(serde_json_bytes::to_value(&response.errors).unwrap(), e); } if let Some(e) = self.expected_extensions { - assert_eq_and_ordered!(serde_json_bytes::to_value(&response.extensions).unwrap(), e); + assert_eq_and_ordered_json!( + serde_json_bytes::to_value(&response.extensions).unwrap(), + e + ); } } } @@ -496,9 +522,15 @@ fn reformat_response_data_best_effort() { "baz": "2", }, "array": [ - {}, + { + "bar":null, + "baz":"3" + }, null, - {}, + { + "bar":"5", + "baz":null + } ], "other": null, }, From 09b16a9f63751ac1d03b81426e75d1881550b286 Mon Sep 17 00:00:00 2001 From: o0Ignition0o Date: Fri, 1 Sep 2023 10:48:11 +0200 Subject: [PATCH 02/50] changeset --- .changesets/fix_igni_typename_fragment_interfaces.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changesets/fix_igni_typename_fragment_interfaces.md diff --git a/.changesets/fix_igni_typename_fragment_interfaces.md b/.changesets/fix_igni_typename_fragment_interfaces.md new file mode 100644 index 0000000000..634bb740b0 --- /dev/null +++ b/.changesets/fix_igni_typename_fragment_interfaces.md @@ -0,0 +1,5 @@ +### Deal with interfaces on fragment spreads when no __typename is queried ([Issue #2587](https://github.com/apollographql/router/issues/2587)) + +Operations would over rely on the presence of __typename to resolve selection sets on interface implementers. This changeset checks for the parent type in an InlineFragment, so we don't drop relevant selection set when applicable. + +By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/3718 From 9e73e8124627612e3cc87b24be7a274c9b19432b Mon Sep 17 00:00:00 2001 From: o0Ignition0o Date: Fri, 1 Sep 2023 14:31:49 +0200 Subject: [PATCH 03/50] move the logic to selection set generation --- .../src/services/supergraph_service.rs | 47 ++++++++++++++++++- apollo-router/src/spec/query.rs | 8 ++-- apollo-router/src/spec/schema.rs | 7 +++ apollo-router/src/spec/selection.rs | 11 ++++- 4 files changed, 65 insertions(+), 8 deletions(-) diff --git a/apollo-router/src/services/supergraph_service.rs b/apollo-router/src/services/supergraph_service.rs index d7bad236b5..4f13c364e8 100644 --- a/apollo-router/src/services/supergraph_service.rs +++ b/apollo-router/src/services/supergraph_service.rs @@ -2934,6 +2934,9 @@ mod tests { ).with_json( serde_json::json!{{"query":"query dog__animal__0{dog{__typename id name}}", "operationName": "dog__animal__0"}}, serde_json::json!{{"data":{"dog":{"__typename":"Dog","id":"4321","name":"Spot"}}}} + ).with_json( + serde_json::json!{{"query":"query dog__animal__0{dog{name id}}", "operationName": "dog__animal__0"}}, + serde_json::json!{{"data":{"dog":{"id":"4321","name":"Spot"}}}} ).build()), ].into_iter().collect()); @@ -3030,7 +3033,7 @@ mod tests { .build() .unwrap(); - let mut stream = service.oneshot(request).await.unwrap(); + let mut stream = service.clone().oneshot(request).await.unwrap(); let with_typename = stream.next_response().await.unwrap(); assert_eq!( @@ -3054,5 +3057,47 @@ mod tests { with_typename, no_typename ); + + let request = supergraph::Request::fake_builder() + .context(defer_context()) + .query( + "query dog { + dog { + ...on Dog { + name + ...on Animal { + id + } + } + } + }", + ) + .build() + .unwrap(); + + let mut stream = service.oneshot(request).await.unwrap(); + + let with_reversed_fragments = stream.next_response().await.unwrap(); + assert_eq!( + with_reversed_fragments + .data + .clone() + .unwrap() + .get("dog") + .unwrap() + .get("name") + .unwrap(), + no_typename + .data + .clone() + .unwrap() + .get("dog") + .unwrap() + .get("name") + .unwrap(), + "{:?}\n{:?}", + with_reversed_fragments, + no_typename + ); } } diff --git a/apollo-router/src/spec/query.rs b/apollo-router/src/spec/query.rs index 8d44c55175..1a588c2a78 100644 --- a/apollo-router/src/spec/query.rs +++ b/apollo-router/src/spec/query.rs @@ -686,12 +686,10 @@ impl Query { || parameters.schema.is_subtype(type_condition, input_type) } else { known_type - .as_ref() - // We have no typename, we apply the selection set if the known_type implements the type_condition - .map(|k| is_subtype_or_same(parameters, type_condition, k)) + .as_ref() + // We have no typename, we apply the selection set if the known_type implements the type_condition + .map(|k| is_subtype_or_same(parameters, type_condition, k)) .unwrap_or_default() - // Or if the known_type implements the parent's type_condition because we're in an inline fragment. - || is_subtype_or_same(parameters, &parent_type.name(), type_condition) }; if is_apply { diff --git a/apollo-router/src/spec/schema.rs b/apollo-router/src/spec/schema.rs index b34b180f37..892883339b 100644 --- a/apollo-router/src/spec/schema.rs +++ b/apollo-router/src/spec/schema.rs @@ -160,6 +160,13 @@ impl Schema { .unwrap_or(false) } + pub(crate) fn is_interface(&self, abstract_type: &str) -> bool { + self.type_system + .definitions + .interfaces + .contains_key(abstract_type) + } + /// Return an iterator over subgraphs that yields the subgraph name and its URL. pub(crate) fn subgraphs(&self) -> impl Iterator { self.subgraphs.iter() diff --git a/apollo-router/src/spec/selection.rs b/apollo-router/src/spec/selection.rs index 34728d419b..55ae883342 100644 --- a/apollo-router/src/spec/selection.rs +++ b/apollo-router/src/spec/selection.rs @@ -152,17 +152,24 @@ impl Selection { let fragment_type = FieldType::new_named(type_condition.clone()); + let relevant_type = if schema.is_interface(type_condition.as_str()) { + current_type + } else { + &fragment_type + }; + + let known_type = relevant_type.inner_type_name().map(|s| s.to_string()); + let selection_set = inline_fragment .selection_set() .selection() .iter() .filter_map(|selection| { - Selection::from_hir(selection, &fragment_type, schema, count, defer_stats) + Selection::from_hir(selection, relevant_type, schema, count, defer_stats) .transpose() }) .collect::>()?; - let known_type = current_type.inner_type_name().map(|s| s.to_string()); Some(Self::InlineFragment { type_condition, selection_set, From ea1b418b63f7e197b56e45d39dc71cc206665d84 Mon Sep 17 00:00:00 2001 From: o0Ignition0o Date: Fri, 1 Sep 2023 14:36:01 +0200 Subject: [PATCH 04/50] revert unused refacto --- apollo-router/src/spec/query.rs | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/apollo-router/src/spec/query.rs b/apollo-router/src/spec/query.rs index 1a588c2a78..6f6b5e02d3 100644 --- a/apollo-router/src/spec/query.rs +++ b/apollo-router/src/spec/query.rs @@ -688,8 +688,9 @@ impl Query { known_type .as_ref() // We have no typename, we apply the selection set if the known_type implements the type_condition - .map(|k| is_subtype_or_same(parameters, type_condition, k)) + .map(|k| parameters.schema.is_subtype(type_condition, k)) .unwrap_or_default() + || known_type.as_deref() == Some(type_condition.as_str()) }; if is_apply { @@ -1067,14 +1068,6 @@ impl Query { } } -fn is_subtype_or_same( - parameters: &FormatParameters<'_>, - parent: &String, - maybe_child: &String, -) -> bool { - parent == maybe_child || parameters.schema.is_subtype(parent, maybe_child) -} - /// Intermediate structure for arguments passed through the entire formatting struct FormatParameters<'a> { variables: &'a Object, From 146f7bc3656e02cae20d5176fcc83d26c64e7176 Mon Sep 17 00:00:00 2001 From: o0Ignition0o Date: Fri, 1 Sep 2023 15:08:30 +0200 Subject: [PATCH 05/50] add one more test on best_effort with a more relevant output --- apollo-router/src/spec/query/tests.rs | 79 +++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) diff --git a/apollo-router/src/spec/query/tests.rs b/apollo-router/src/spec/query/tests.rs index 32c08139ab..61c6b35cef 100644 --- a/apollo-router/src/spec/query/tests.rs +++ b/apollo-router/src/spec/query/tests.rs @@ -539,6 +539,85 @@ fn reformat_response_data_best_effort() { .test(); } +#[test] +// just like the test above, except the query is one the planner would generate. +fn reformat_response_data_best_effort_relevant_query() { + FormatTest::builder() + .schema( + "type Query { + get: Thing + } + type Thing { + foo: String + stuff: Baz + array: [Element] + other: Bar + } + + type Baz { + bar: String + baz: String + } + + type Bar { + bar: String + } + + union Element = Baz | Bar + ", + ) + .query("{get{foo stuff{bar baz}array{...on Baz{bar baz}}other{bar}}}") + // the planner generates this: + // {get{foo stuff{bar baz}array{__typename ...on Baz{bar baz}}other{bar}}} + .response(json! { + { + "get": { + "foo": "1", + "stuff": {"baz": "2"}, + "array": [ + { + "__typename": "Baz", + "baz": "3" + }, + "4", + { + "__typename": "Baz", + "baz": "5" + }, + ], + "other": "6", + }, + "should_be_removed": { + "aaa": 2 + }, + } + }) + .expected(json! { + { + "get": { + "foo": "1", + "stuff": { + "bar": null, + "baz": "2", + }, + "array": [ + { + "bar":null, + "baz":"3" + }, + null, + { + "bar": null, + "baz":"5" + } + ], + "other": null, + }, + } + }) + .test(); +} + #[test] fn reformat_response_array_of_scalar_simple() { FormatTest::builder() From 1606786cc539380e258a4b5de27c1d0d5a70aa9c Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 2 Sep 2023 16:21:21 +0000 Subject: [PATCH 06/50] fix(deps): update rust crate regex to 1.9.5 --- Cargo.lock | 10 +++++----- apollo-router/Cargo.toml | 2 +- xtask/Cargo.lock | 12 ++++++------ xtask/Cargo.toml | 2 +- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1c570d7ec0..f1793dfbfc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4975,13 +4975,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12de2eff854e5fa4b1295edd650e227e9d8fb0c9e90b12e7f36d6a6811791a29" +checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.3.7", + "regex-automata 0.3.8", "regex-syntax 0.7.5", ] @@ -4996,9 +4996,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49530408a136e16e5b486e883fbb6ba058e8e4e8ae6621a77b048b314336e629" +checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" dependencies = [ "aho-corasick", "memchr", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 8b25903d3a..5ab613a39a 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -163,7 +163,7 @@ prost-types = "0.11.9" proteus = "0.5.0" rand = "0.8.5" rhai = { version = "1.15.1", features = ["sync", "serde", "internals"] } -regex = "1.9.4" +regex = "1.9.5" reqwest = { version = "0.11.19", default-features = false, features = [ "rustls-tls", "rustls-native-certs", diff --git a/xtask/Cargo.lock b/xtask/Cargo.lock index d11f5724b0..bdf077abae 100644 --- a/xtask/Cargo.lock +++ b/xtask/Cargo.lock @@ -834,9 +834,9 @@ checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "memchr" -version = "2.5.0" +version = "2.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "5486aed0026218e61b8a01d5fbd5a0a134649abb71a0e53b7bc088529dced86e" [[package]] name = "memorable-wordlist" @@ -1060,9 +1060,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12de2eff854e5fa4b1295edd650e227e9d8fb0c9e90b12e7f36d6a6811791a29" +checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" dependencies = [ "aho-corasick", "memchr", @@ -1072,9 +1072,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49530408a136e16e5b486e883fbb6ba058e8e4e8ae6621a77b048b314336e629" +checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" dependencies = [ "aho-corasick", "memchr", diff --git a/xtask/Cargo.toml b/xtask/Cargo.toml index 840f2a9283..b19d08afac 100644 --- a/xtask/Cargo.toml +++ b/xtask/Cargo.toml @@ -26,7 +26,7 @@ libc = "0.2" memorable-wordlist = "0.1.7" nu-ansi-term = "0.49" once_cell = "1" -regex = "1.9.4" +regex = "1.9.5" reqwest = { version = "0.11", default-features = false, features = [ "blocking", "rustls-tls", From 4acbc82ab733da7a256adb7091b192c39e070a09 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 2 Sep 2023 23:20:36 +0000 Subject: [PATCH 07/50] chore(deps): update all non-major packages >= 1.0 --- Cargo.lock | 12 ++++++------ apollo-router/Cargo.toml | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f1793dfbfc..fc9568d242 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3716,9 +3716,9 @@ checksum = "8c408dc227d302f1496c84d9dc68c00fec6f56f9228a18f3023f976f3ca7c945" [[package]] name = "memchr" -version = "2.6.2" +version = "2.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5486aed0026218e61b8a01d5fbd5a0a134649abb71a0e53b7bc088529dced86e" +checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" [[package]] name = "memoffset" @@ -6156,18 +6156,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.47" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97a802ec30afc17eee47b2855fc72e0c4cd62be9b4efe6591edde0ec5bd68d8f" +checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.47" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bb623b56e39ab7dcd4b1b98bb6c8f8d907ed255b18de254088016b27a8ee19b" +checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 5ab613a39a..1334b2f0aa 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -189,7 +189,7 @@ serde_yaml = "0.8.26" static_assertions = "1.1.0" strum_macros = "0.25.2" sys-info = "0.9.1" -thiserror = "1.0.47" +thiserror = "1.0.48" tokio = { version = "1.32.0", features = ["full"] } tokio-stream = { version = "0.1.14", features = ["sync", "net"] } tokio-util = { version = "0.7.8", features = ["net", "codec", "time"] } @@ -231,7 +231,7 @@ tokio-rustls = "0.24.1" http-serde = "1.1.3" hmac = "0.12.1" parking_lot = "0.12.1" -memchr = "2.6.2" +memchr = "2.6.3" brotli = "3.3.4" zstd = "0.12.4" zstd-safe = "6.0.6" @@ -265,7 +265,7 @@ futures-test = "0.3.28" insta = { version = "1.31.0", features = ["json", "redactions", "yaml"] } introspector-gadget = "0.2.2" maplit = "1.0.2" -memchr = { version = "2.6.2", default-features = false } +memchr = { version = "2.6.3", default-features = false } mockall = "0.11.4" once_cell = "1.18.0" p256 = "0.12.0" From a97ac6b079e8ba3d85ac75ad452bbb46a6022c75 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Mon, 4 Sep 2023 16:19:46 +0200 Subject: [PATCH 08/50] small performance improvements for telemetry (#3656) * check span names from a HashSet instead of iterating through an array of strings * move the SpanMetricsExporter to a tracing Layer (#3669) SpanMetricsExporter is an OpenTelemetry Exporter, which means that if it is loaded, then the entire OpenTelemetry handling infrastructure is loaded, and especially the part that allocates data for the entire list of spans. Unfortunately, the SpanMetricsExporter was always loaded, even if we do not export the metrics outside of the router, which means that there's a constant overhead of telemetry even when it is not used. This moves the SpanMetricsExporter to a lightweight tracing-subscriber Layer which performs the same busy/idle accounting as OpenTelemetryLayer, then generates the same events as before when the span closes --- .changesets/fix_geal_telemetry_perf.md | 5 + .../metrics/span_metrics_exporter.rs | 191 ++++++++++++------ apollo-router/src/plugins/telemetry/mod.rs | 7 +- apollo-router/src/plugins/telemetry/reload.rs | 12 +- 4 files changed, 142 insertions(+), 73 deletions(-) create mode 100644 .changesets/fix_geal_telemetry_perf.md diff --git a/.changesets/fix_geal_telemetry_perf.md b/.changesets/fix_geal_telemetry_perf.md new file mode 100644 index 0000000000..f20ceee31a --- /dev/null +++ b/.changesets/fix_geal_telemetry_perf.md @@ -0,0 +1,5 @@ +### small performance improvements for telemetry ([PR #3656](https://github.com/apollographql/router/pull/3656)) + +The SpanMetricsExporter, used to report span timings hade a few inefficiencies in the way it recognized spans, and it brought a constant overhead to the router usage, even when telemetry was not configured. It has now been isolated and optimized + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3656 \ No newline at end of file diff --git a/apollo-router/src/plugins/telemetry/metrics/span_metrics_exporter.rs b/apollo-router/src/plugins/telemetry/metrics/span_metrics_exporter.rs index 1c2aa6642a..5e6778ab74 100644 --- a/apollo-router/src/plugins/telemetry/metrics/span_metrics_exporter.rs +++ b/apollo-router/src/plugins/telemetry/metrics/span_metrics_exporter.rs @@ -1,11 +1,13 @@ -use async_trait::async_trait; -use futures::future::BoxFuture; -use futures::FutureExt; -use opentelemetry::sdk::export::trace::ExportResult; -use opentelemetry::sdk::export::trace::SpanData; -use opentelemetry::sdk::export::trace::SpanExporter; -use opentelemetry::Key; -use opentelemetry::Value; +use std::collections::HashSet; +use std::time::Instant; + +use tracing_core::field::Visit; +use tracing_core::span; +use tracing_core::Field; +use tracing_core::Subscriber; +use tracing_subscriber::layer::Context; +use tracing_subscriber::registry::LookupSpan; +use tracing_subscriber::Layer; use crate::axum_factory::utils::REQUEST_SPAN_NAME; use crate::plugins::telemetry::EXECUTION_SPAN_NAME; @@ -13,69 +15,126 @@ use crate::plugins::telemetry::SUBGRAPH_SPAN_NAME; use crate::plugins::telemetry::SUPERGRAPH_SPAN_NAME; use crate::services::QUERY_PLANNING_SPAN_NAME; -const SPAN_NAMES: &[&str] = &[ - REQUEST_SPAN_NAME, - SUPERGRAPH_SPAN_NAME, - SUBGRAPH_SPAN_NAME, - QUERY_PLANNING_SPAN_NAME, - EXECUTION_SPAN_NAME, -]; - -const BUSY_NS_ATTRIBUTE_NAME: Key = Key::from_static_str("busy_ns"); -const IDLE_NS_ATTRIBUTE_NAME: Key = Key::from_static_str("idle_ns"); -const SUBGRAPH_ATTRIBUTE_NAME: Key = Key::from_static_str("apollo.subgraph.name"); - -#[derive(Debug, Default)] -pub(crate) struct Exporter {} -#[async_trait] -impl SpanExporter for Exporter { - /// Export spans metrics to real metrics - fn export(&mut self, batch: Vec) -> BoxFuture<'static, ExportResult> { - for span in batch - .into_iter() - .filter(|s| SPAN_NAMES.contains(&s.name.as_ref())) - { - let busy = span - .attributes - .get(&BUSY_NS_ATTRIBUTE_NAME) - .and_then(|attr| match attr { - Value::I64(v) => Some(*v), - _ => None, - }) - .unwrap_or_default(); - let idle = span - .attributes - .get(&IDLE_NS_ATTRIBUTE_NAME) - .and_then(|attr| match attr { - Value::I64(v) => Some(*v), - _ => None, - }) - .unwrap_or_default(); - let duration = span - .end_time - .duration_since(span.start_time) - .unwrap_or_default() - .as_secs_f64(); +const SUBGRAPH_ATTRIBUTE_NAME: &str = "apollo.subgraph.name"; + +#[derive(Debug)] +pub(crate) struct SpanMetricsLayer { + span_names: HashSet<&'static str>, +} + +impl Default for SpanMetricsLayer { + fn default() -> Self { + Self { + span_names: [ + REQUEST_SPAN_NAME, + SUPERGRAPH_SPAN_NAME, + SUBGRAPH_SPAN_NAME, + QUERY_PLANNING_SPAN_NAME, + EXECUTION_SPAN_NAME, + ] + .into(), + } + } +} + +impl Layer for SpanMetricsLayer +where + S: Subscriber + for<'span> LookupSpan<'span>, +{ + fn on_new_span(&self, attrs: &span::Attributes<'_>, id: &span::Id, ctx: Context<'_, S>) { + let span = ctx.span(id).expect("Span not found, this is a bug"); + let mut extensions = span.extensions_mut(); + + let name = attrs.metadata().name(); + if self.span_names.contains(name) && extensions.get_mut::().is_none() { + let mut timings = Timings::new(); + if name == SUBGRAPH_SPAN_NAME { + attrs.values().record(&mut ValueVisitor { + timings: &mut timings, + }); + } + extensions.insert(Timings::new()); + } + } + + fn on_record(&self, _span: &span::Id, _values: &span::Record<'_>, _ctx: Context<'_, S>) {} + + fn on_close(&self, id: span::Id, ctx: Context<'_, S>) { + let span = ctx.span(&id).expect("Span not found, this is a bug"); + let mut extensions = span.extensions_mut(); + + if let Some(timings) = extensions.get_mut::() { + let duration = timings.start.elapsed().as_secs_f64(); // Convert it in seconds - let idle: f64 = idle as f64 / 1_000_000_000_f64; - let busy: f64 = busy as f64 / 1_000_000_000_f64; - if span.name == SUBGRAPH_SPAN_NAME { - let subgraph_name = span - .attributes - .get(&SUBGRAPH_ATTRIBUTE_NAME) - .map(|name| name.as_str()) - .unwrap_or_default(); - ::tracing::info!(histogram.apollo_router_span = duration, kind = %"duration", span = %span.name, subgraph = %subgraph_name); - ::tracing::info!(histogram.apollo_router_span = idle, kind = %"idle", span = %span.name, subgraph = %subgraph_name); - ::tracing::info!(histogram.apollo_router_span = busy, kind = %"busy", span = %span.name, subgraph = %subgraph_name); + let idle: f64 = timings.idle as f64 / 1_000_000_000_f64; + let busy: f64 = timings.busy as f64 / 1_000_000_000_f64; + let name = span.metadata().name(); + if let Some(subgraph_name) = timings.subgraph.take() { + ::tracing::info!(histogram.apollo_router_span = duration, kind = %"duration", span = %name, subgraph = %subgraph_name); + ::tracing::info!(histogram.apollo_router_span = idle, kind = %"idle", span = %name, subgraph = %subgraph_name); + ::tracing::info!(histogram.apollo_router_span = busy, kind = %"busy", span = %name, subgraph = %subgraph_name); } else { - ::tracing::info!(histogram.apollo_router_span = duration, kind = %"duration", span = %span.name); - ::tracing::info!(histogram.apollo_router_span = idle, kind = %"idle", span = %span.name); - ::tracing::info!(histogram.apollo_router_span = busy, kind = %"busy", span = %span.name); + ::tracing::info!(histogram.apollo_router_span = duration, kind = %"duration", span = %name); + ::tracing::info!(histogram.apollo_router_span = idle, kind = %"idle", span = %name); + ::tracing::info!(histogram.apollo_router_span = busy, kind = %"busy", span = %name); } } + } - async { Ok(()) }.boxed() + fn on_enter(&self, id: &span::Id, ctx: Context<'_, S>) { + let span = ctx.span(id).expect("Span not found, this is a bug"); + let mut extensions = span.extensions_mut(); + + if let Some(timings) = extensions.get_mut::() { + let now = Instant::now(); + timings.idle += (now - timings.last).as_nanos() as i64; + timings.last = now; + } + } + + fn on_exit(&self, id: &span::Id, ctx: Context<'_, S>) { + let span = ctx.span(id).expect("Span not found, this is a bug"); + let mut extensions = span.extensions_mut(); + + if let Some(timings) = extensions.get_mut::() { + let now = Instant::now(); + timings.busy += (now - timings.last).as_nanos() as i64; + timings.last = now; + } + } +} + +struct Timings { + idle: i64, + busy: i64, + last: Instant, + start: Instant, + subgraph: Option, +} + +impl Timings { + fn new() -> Self { + Self { + idle: 0, + busy: 0, + last: Instant::now(), + start: Instant::now(), + subgraph: None, + } + } +} + +struct ValueVisitor<'a> { + timings: &'a mut Timings, +} + +impl<'a> Visit for ValueVisitor<'a> { + fn record_debug(&mut self, _field: &Field, _value: &dyn std::fmt::Debug) {} + + fn record_str(&mut self, field: &Field, value: &str) { + if field.name() == SUBGRAPH_ATTRIBUTE_NAME { + self.timings.subgraph = Some(value.to_string()); + } } } diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index b61d5fbf21..f6a8ba5202 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -66,6 +66,7 @@ use self::metrics::AttributesForwardConf; use self::metrics::MetricsAttributesConf; use self::reload::reload_fmt; use self::reload::reload_metrics; +use self::reload::LayeredRegistry; use self::reload::NullFieldFormatter; use self::reload::OPENTELEMETRY_TRACER_HANDLE; use self::tracing::apollo_telemetry::APOLLO_PRIVATE_DURATION_NS; @@ -622,8 +623,6 @@ impl Telemetry { builder = setup_tracing(builder, &tracing_config.datadog, trace_config)?; builder = setup_tracing(builder, &tracing_config.otlp, trace_config)?; builder = setup_tracing(builder, &config.apollo, trace_config)?; - // For metrics - builder = builder.with_simple_exporter(metrics::span_metrics_exporter::Exporter::default()); let tracer_provider = builder.build(); Ok(tracer_provider) @@ -672,10 +671,10 @@ impl Telemetry { dyn Layer< ::tracing_subscriber::layer::Layered< OpenTelemetryLayer< - ::tracing_subscriber::Registry, + LayeredRegistry, ReloadTracer<::opentelemetry::sdk::trace::Tracer>, >, - ::tracing_subscriber::Registry, + LayeredRegistry, >, > + Send + Sync, diff --git a/apollo-router/src/plugins/telemetry/reload.rs b/apollo-router/src/plugins/telemetry/reload.rs index 1c66ccf4ef..50ce48747a 100644 --- a/apollo-router/src/plugins/telemetry/reload.rs +++ b/apollo-router/src/plugins/telemetry/reload.rs @@ -15,6 +15,7 @@ use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::EnvFilter; use tracing_subscriber::Registry; +use super::metrics::span_metrics_exporter::SpanMetricsLayer; use crate::plugins::telemetry::formatters::filter_metric_events; use crate::plugins::telemetry::formatters::text::TextFormatter; use crate::plugins::telemetry::formatters::FilteringFormatter; @@ -22,7 +23,10 @@ use crate::plugins::telemetry::metrics; use crate::plugins::telemetry::metrics::layer::MetricsLayer; use crate::plugins::telemetry::tracing::reload::ReloadTracer; -type LayeredTracer = Layered>, Registry>; +pub(crate) type LayeredRegistry = Layered; + +type LayeredTracer = + Layered>, LayeredRegistry>; // These handles allow hot tracing of layers. They have complex type definitions because tracing has // generic types in the layer definition. @@ -96,6 +100,7 @@ pub(crate) fn init_telemetry(log_level: &str) -> Result<()> { // Env filter is separate because of https://github.com/tokio-rs/tracing/issues/1629 // the tracing registry is only created once tracing_subscriber::registry() + .with(SpanMetricsLayer::default()) .with(opentelemetry_layer) .with(fmt_layer) .with(metrics_layer) @@ -128,8 +133,9 @@ pub(super) fn reload_metrics(layer: MetricsLayer) { #[allow(clippy::type_complexity)] pub(super) fn reload_fmt( layer: Box< - dyn Layer>, Registry>> - + Send + dyn Layer< + Layered>, LayeredRegistry>, + > + Send + Sync, >, ) { From 307e0ebed237c892d835ee4310b441fb956272c3 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Mon, 4 Sep 2023 16:29:51 +0200 Subject: [PATCH 09/50] remove clones from the header plugin (#3721) Fix #3068 the operations were cloned for every subgraph query, this is a bit wasteful so we create them only once, and store them under an Arc --- ..._geal_remove_clones_from_headers_plugin.md | 5 + apollo-router/src/plugins/headers.rs | 132 ++++++++++-------- 2 files changed, 82 insertions(+), 55 deletions(-) create mode 100644 .changesets/fix_geal_remove_clones_from_headers_plugin.md diff --git a/.changesets/fix_geal_remove_clones_from_headers_plugin.md b/.changesets/fix_geal_remove_clones_from_headers_plugin.md new file mode 100644 index 0000000000..36881a1de4 --- /dev/null +++ b/.changesets/fix_geal_remove_clones_from_headers_plugin.md @@ -0,0 +1,5 @@ +### remove clones from the header plugin ([Issue #3068](https://github.com/apollographql/router/issues/3068)) + +The list of header operations was cloned for every subgraph query, and this was increasing latency. We made sure the overhead is minimal by removing those allocations + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3721 \ No newline at end of file diff --git a/apollo-router/src/plugins/headers.rs b/apollo-router/src/plugins/headers.rs index 7a6e56d4ac..074cafd5e8 100644 --- a/apollo-router/src/plugins/headers.rs +++ b/apollo-router/src/plugins/headers.rs @@ -1,4 +1,5 @@ use std::collections::HashMap; +use std::sync::Arc; use std::task::Context; use std::task::Poll; @@ -187,7 +188,8 @@ struct Config { } struct Headers { - config: Config, + all_operations: Arc>, + subgraph_operations: HashMap>>, } #[async_trait::async_trait] @@ -195,36 +197,48 @@ impl Plugin for Headers { type Config = Config; async fn new(init: PluginInit) -> Result { - Ok(Headers { - config: init.config, - }) - } - fn subgraph_service(&self, name: &str, service: subgraph::BoxService) -> subgraph::BoxService { - let mut operations: Vec = self + let operations: Vec = init .config .all .as_ref() .map(|a| a.request.clone()) .unwrap_or_default(); - if let Some(mut subgraph_operations) = - self.config.subgraphs.get(name).map(|s| s.request.clone()) - { - operations.append(&mut subgraph_operations); - } + let subgraph_operations = init + .config + .subgraphs + .iter() + .map(|(subgraph_name, op)| { + let mut operations = operations.clone(); + operations.append(&mut op.request.clone()); + (subgraph_name.clone(), Arc::new(operations)) + }) + .collect(); + Ok(Headers { + all_operations: Arc::new(operations), + subgraph_operations, + }) + } + + fn subgraph_service(&self, name: &str, service: subgraph::BoxService) -> subgraph::BoxService { ServiceBuilder::new() - .layer(HeadersLayer::new(operations)) + .layer(HeadersLayer::new( + self.subgraph_operations + .get(name) + .cloned() + .unwrap_or_else(|| self.all_operations.clone()), + )) .service(service) .boxed() } } struct HeadersLayer { - operations: Vec, + operations: Arc>, } impl HeadersLayer { - fn new(operations: Vec) -> Self { + fn new(operations: Arc>) -> Self { Self { operations } } } @@ -241,7 +255,7 @@ impl Layer for HeadersLayer { } struct HeadersService { inner: S, - operations: Vec, + operations: Arc>, } lazy_static! { @@ -279,7 +293,7 @@ where } fn call(&mut self, mut req: SubgraphRequest) -> Self::Future { - for operation in &self.operations { + for operation in &*self.operations { match operation { Operation::Insert(insert_config) => match insert_config { Insert::Static(static_insert) => { @@ -523,12 +537,13 @@ mod test { }) .returning(example_response); - let mut service = - HeadersLayer::new(vec![Operation::Insert(Insert::Static(InsertStatic { + let mut service = HeadersLayer::new(Arc::new(vec![Operation::Insert(Insert::Static( + InsertStatic { name: "c".try_into()?, value: "d".try_into()?, - }))]) - .layer(mock); + }, + ))])) + .layer(mock); service.ready().await?.call(example_request()).await?; Ok(()) @@ -549,12 +564,12 @@ mod test { }) .returning(example_response); - let mut service = HeadersLayer::new(vec![Operation::Insert(Insert::FromContext( - InsertFromContext { + let mut service = HeadersLayer::new(Arc::new(vec![Operation::Insert( + Insert::FromContext(InsertFromContext { name: "header_from_context".try_into()?, from_context: "my_key".to_string(), - }, - ))]) + }), + )])) .layer(mock); service.ready().await?.call(example_request()).await?; @@ -576,13 +591,14 @@ mod test { }) .returning(example_response); - let mut service = - HeadersLayer::new(vec![Operation::Insert(Insert::FromBody(InsertFromBody { + let mut service = HeadersLayer::new(Arc::new(vec![Operation::Insert(Insert::FromBody( + InsertFromBody { name: "header_from_request".try_into()?, path: JSONQuery::parse(".operationName")?, default: None, - }))]) - .layer(mock); + }, + ))])) + .layer(mock); service.ready().await?.call(example_request()).await?; Ok(()) @@ -596,8 +612,10 @@ mod test { .withf(|request| request.assert_headers(vec![("ac", "vac"), ("ab", "vab")])) .returning(example_response); - let mut service = - HeadersLayer::new(vec![Operation::Remove(Remove::Named("aa".try_into()?))]).layer(mock); + let mut service = HeadersLayer::new(Arc::new(vec![Operation::Remove(Remove::Named( + "aa".try_into()?, + ))])) + .layer(mock); service.ready().await?.call(example_request()).await?; Ok(()) @@ -611,9 +629,9 @@ mod test { .withf(|request| request.assert_headers(vec![("ac", "vac")])) .returning(example_response); - let mut service = HeadersLayer::new(vec![Operation::Remove(Remove::Matching( + let mut service = HeadersLayer::new(Arc::new(vec![Operation::Remove(Remove::Matching( Regex::from_str("a[ab]")?, - ))]) + ))])) .layer(mock); service.ready().await?.call(example_request()).await?; @@ -636,10 +654,11 @@ mod test { }) .returning(example_response); - let mut service = HeadersLayer::new(vec![Operation::Propagate(Propagate::Matching { - matching: Regex::from_str("d[ab]")?, - })]) - .layer(mock); + let mut service = + HeadersLayer::new(Arc::new(vec![Operation::Propagate(Propagate::Matching { + matching: Regex::from_str("d[ab]")?, + })])) + .layer(mock); service.ready().await?.call(example_request()).await?; Ok(()) @@ -660,12 +679,13 @@ mod test { }) .returning(example_response); - let mut service = HeadersLayer::new(vec![Operation::Propagate(Propagate::Named { - named: "da".try_into()?, - rename: None, - default: None, - })]) - .layer(mock); + let mut service = + HeadersLayer::new(Arc::new(vec![Operation::Propagate(Propagate::Named { + named: "da".try_into()?, + rename: None, + default: None, + })])) + .layer(mock); service.ready().await?.call(example_request()).await?; Ok(()) @@ -686,12 +706,13 @@ mod test { }) .returning(example_response); - let mut service = HeadersLayer::new(vec![Operation::Propagate(Propagate::Named { - named: "da".try_into()?, - rename: Some("ea".try_into()?), - default: None, - })]) - .layer(mock); + let mut service = + HeadersLayer::new(Arc::new(vec![Operation::Propagate(Propagate::Named { + named: "da".try_into()?, + rename: Some("ea".try_into()?), + default: None, + })])) + .layer(mock); service.ready().await?.call(example_request()).await?; Ok(()) @@ -712,12 +733,13 @@ mod test { }) .returning(example_response); - let mut service = HeadersLayer::new(vec![Operation::Propagate(Propagate::Named { - named: "ea".try_into()?, - rename: None, - default: Some("defaulted".try_into()?), - })]) - .layer(mock); + let mut service = + HeadersLayer::new(Arc::new(vec![Operation::Propagate(Propagate::Named { + named: "ea".try_into()?, + rename: None, + default: Some("defaulted".try_into()?), + })])) + .layer(mock); service.ready().await?.call(example_request()).await?; Ok(()) From b8efa32bc6ef7e883fdae578e9cb20590374115b Mon Sep 17 00:00:00 2001 From: Avery Harnish Date: Mon, 4 Sep 2023 09:37:04 -0500 Subject: [PATCH 10/50] chore: removes unused introspector-gadget crate (#3709) This PR removes the `introspector-gadget` dependency from the `Cargo.toml` as I don't believe it's used by this crate anymore. If this passes I think I'd like to deprecate that crate and move it fully back into the Rover repository so it's easier to make changes to its implementation. --- Cargo.lock | 43 ++-------------------------------------- apollo-router/Cargo.toml | 1 - 2 files changed, 2 insertions(+), 42 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fc9568d242..aa06341378 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -230,16 +230,6 @@ dependencies = [ "uuid", ] -[[package]] -name = "apollo-encoder" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a67cf5282faa8a66c848e1f4aef139c3cfe307025029983d05b80f8360f41e8" -dependencies = [ - "apollo-parser 0.5.3", - "thiserror", -] - [[package]] name = "apollo-encoder" version = "0.7.0" @@ -278,7 +268,7 @@ dependencies = [ "access-json", "anyhow", "apollo-compiler 0.11.1", - "apollo-encoder 0.7.0", + "apollo-encoder", "apollo-parser 0.6.1", "arc-swap", "askama", @@ -322,7 +312,6 @@ dependencies = [ "hyper-rustls", "indexmap 2.0.0", "insta", - "introspector-gadget", "itertools 0.11.0", "jsonpath-rust", "jsonpath_lib", @@ -453,7 +442,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13ed94bb9de9f24da12ca2122b8eeaa7484d17b090dc84daaaba6b6ac2bee89b" dependencies = [ - "apollo-encoder 0.7.0", + "apollo-encoder", "apollo-parser 0.6.1", "arbitrary", "once_cell", @@ -1005,17 +994,6 @@ dependencies = [ "tower-service", ] -[[package]] -name = "backoff" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" -dependencies = [ - "getrandom 0.2.10", - "instant", - "rand 0.8.5", -] - [[package]] name = "backtrace" version = "0.3.68" @@ -3324,23 +3302,6 @@ version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" -[[package]] -name = "introspector-gadget" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f07e629cb6382fac6e1fb1560123f81438556e6fe4219fec939ad5ff4345d9fa" -dependencies = [ - "apollo-encoder 0.5.1", - "backoff", - "graphql_client", - "hyper", - "reqwest", - "serde", - "serde_json", - "thiserror", - "tracing", -] - [[package]] name = "inventory" version = "0.2.3" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 1334b2f0aa..8c70c8c4d4 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -263,7 +263,6 @@ ecdsa = { version = "0.15.1", features = ["signing", "pem", "pkcs8"] } fred = { version = "6.3.1", features = ["enable-rustls", "no-client-setname"] } futures-test = "0.3.28" insta = { version = "1.31.0", features = ["json", "redactions", "yaml"] } -introspector-gadget = "0.2.2" maplit = "1.0.2" memchr = { version = "2.6.3", default-features = false } mockall = "0.11.4" From 27df9da695b016ad2bb28272469aa14ce93b0dc1 Mon Sep 17 00:00:00 2001 From: o0Ignition0o Date: Mon, 4 Sep 2023 17:25:38 +0200 Subject: [PATCH 11/50] Fix: Allow anonymous operation_name to be set in the context. This change allows the contexts OPERATION_NAME to be set to None. --- apollo-router/src/context/mod.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/apollo-router/src/context/mod.rs b/apollo-router/src/context/mod.rs index 07042e08a8..8313768848 100644 --- a/apollo-router/src/context/mod.rs +++ b/apollo-router/src/context/mod.rs @@ -74,7 +74,7 @@ impl Context { // This method should be removed once we have a proper way to get the operation name. self.entries .get(OPERATION_NAME) - .map(|v| v.value().as_str().unwrap().to_string()) + .and_then(|v| v.value().as_str().map(|s| s.to_string())) } /// Returns true if the context contains a value for the specified key. @@ -307,7 +307,7 @@ impl Default for BusyTimer { #[cfg(test)] mod test { - use crate::Context; + use crate::{context::OPERATION_NAME, Context}; #[test] fn test_context_insert() { @@ -370,4 +370,11 @@ mod test { assert_eq!(c.get("one").unwrap(), Some(2)); assert_eq!(c.get("two").unwrap(), Some(3)); } + + #[test] + fn operation_name_defaults_to_an_empty_string() { + let c = Context::new(); + c.insert(OPERATION_NAME, Option::::None).unwrap(); + assert!(c.operation_name().is_none()) + } } From 7a565fde87e9b598a15369f085102377017d38c5 Mon Sep 17 00:00:00 2001 From: o0Ignition0o Date: Mon, 4 Sep 2023 17:39:07 +0200 Subject: [PATCH 12/50] lint --- apollo-router/src/context/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/apollo-router/src/context/mod.rs b/apollo-router/src/context/mod.rs index 8313768848..8b3954aa0d 100644 --- a/apollo-router/src/context/mod.rs +++ b/apollo-router/src/context/mod.rs @@ -307,7 +307,8 @@ impl Default for BusyTimer { #[cfg(test)] mod test { - use crate::{context::OPERATION_NAME, Context}; + use crate::context::OPERATION_NAME; + use crate::Context; #[test] fn test_context_insert() { From bbcfe24b11616337b1d2b5bbb5f5013e01f5c699 Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Mon, 4 Sep 2023 15:56:56 +0000 Subject: [PATCH 13/50] changeset --- .changesets/fix_dragonfly_ship_win_folder.md | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 .changesets/fix_dragonfly_ship_win_folder.md diff --git a/.changesets/fix_dragonfly_ship_win_folder.md b/.changesets/fix_dragonfly_ship_win_folder.md new file mode 100644 index 0000000000..892df67579 --- /dev/null +++ b/.changesets/fix_dragonfly_ship_win_folder.md @@ -0,0 +1,9 @@ +### Subscriptions: Correct v1.28.x regression allowing panic via un-named subscription operation + +Correct a regression that was introduced in Router v1.28.0 which made a Router **panic** possible when the following _three_ conditions are _all_ met: + +1. When sending an un-named (i.e., "anonymous") `subscription` operation (e.g., `subscription { ... }`); **and**; +2. The Router has a `subscription` type defined in the Supergraph schema; **and** +3. Have subscriptions enabled (they are disabled by default) in the Router's YAML configuration, either by setting `enabled: true` _or_ by setting a `mode` within the `subscriptions` object (as seen in [the subscriptions documentation](https://www.apollographql.com/docs/router/executing-operations/subscription-support/#router-setup). + +By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/3738 From 3ac446999eeb1c1ad181fcbc061f56fbfa414510 Mon Sep 17 00:00:00 2001 From: o0Ignition0o Date: Mon, 4 Sep 2023 17:59:36 +0200 Subject: [PATCH 14/50] remove the function alltogether, along with its call --- apollo-router/src/context/mod.rs | 14 -------------- apollo-router/src/services/subgraph_service.rs | 8 +++++++- 2 files changed, 7 insertions(+), 15 deletions(-) diff --git a/apollo-router/src/context/mod.rs b/apollo-router/src/context/mod.rs index 8b3954aa0d..bb45b46963 100644 --- a/apollo-router/src/context/mod.rs +++ b/apollo-router/src/context/mod.rs @@ -70,13 +70,6 @@ impl Context { } impl Context { - pub(crate) fn operation_name(&self) -> Option { - // This method should be removed once we have a proper way to get the operation name. - self.entries - .get(OPERATION_NAME) - .and_then(|v| v.value().as_str().map(|s| s.to_string())) - } - /// Returns true if the context contains a value for the specified key. pub fn contains_key(&self, key: K) -> bool where @@ -371,11 +364,4 @@ mod test { assert_eq!(c.get("one").unwrap(), Some(2)); assert_eq!(c.get("two").unwrap(), Some(3)); } - - #[test] - fn operation_name_defaults_to_an_empty_string() { - let c = Context::new(); - c.insert(OPERATION_NAME, Option::::None).unwrap(); - assert!(c.operation_name().is_none()) - } } diff --git a/apollo-router/src/services/subgraph_service.rs b/apollo-router/src/services/subgraph_service.rs index e137177a7c..c99e482786 100644 --- a/apollo-router/src/services/subgraph_service.rs +++ b/apollo-router/src/services/subgraph_service.rs @@ -431,6 +431,13 @@ async fn call_websocket( subgraph_cfg: &WebSocketConfiguration, subscription_hash: String, ) -> Result { + let operation_name = request + .subgraph_request + .body() + .operation_name + .clone() + .unwrap_or_default(); + let SubgraphRequest { subgraph_request, subscription_stream, @@ -445,7 +452,6 @@ async fn call_websocket( let (handle, created) = notify .create_or_subscribe(subscription_hash.clone(), false) .await?; - let operation_name = context.operation_name().unwrap_or_default(); tracing::info!( monotonic_counter.apollo.router.operations.subscriptions = 1u64, subscriptions.mode = %"passthrough", From 8904011668cf965101e07c3f41235b7f5a84e2a3 Mon Sep 17 00:00:00 2001 From: o0Ignition0o Date: Mon, 4 Sep 2023 18:01:45 +0200 Subject: [PATCH 15/50] remove unused import --- apollo-router/src/context/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/apollo-router/src/context/mod.rs b/apollo-router/src/context/mod.rs index bb45b46963..83bdb2bc80 100644 --- a/apollo-router/src/context/mod.rs +++ b/apollo-router/src/context/mod.rs @@ -300,7 +300,6 @@ impl Default for BusyTimer { #[cfg(test)] mod test { - use crate::context::OPERATION_NAME; use crate::Context; #[test] From a9bbe0ff94f9ef9b7c97b7ea70eb979dbbc06665 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 5 Sep 2023 12:41:07 +0300 Subject: [PATCH 16/50] chore(deps): update actions/checkout action to v4 (#3733) --- .github/workflows/update_uplink_schema.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/update_uplink_schema.yml b/.github/workflows/update_uplink_schema.yml index 20a7560c0b..706c8f0457 100644 --- a/.github/workflows/update_uplink_schema.yml +++ b/.github/workflows/update_uplink_schema.yml @@ -9,7 +9,7 @@ jobs: Update-Uplink-Schema: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4 - name: Install Rover run: | curl -sSL https://rover.apollo.dev/nix/v0.14.1 | sh From f8cfff7519474e79b3350c0a5543abbc883fa027 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Tue, 5 Sep 2023 11:49:20 +0200 Subject: [PATCH 17/50] use snapshots in the tests and different ids to recognize responses --- ..._service__tests__no_typename_on_interface-2.snap | 13 +++++++++++++ ..._service__tests__no_typename_on_interface-3.snap | 12 ++++++++++++ ...ph_service__tests__no_typename_on_interface.snap | 12 ++++++++++++ apollo-router/src/services/supergraph_service.rs | 7 +++++-- 4 files changed, 42 insertions(+), 2 deletions(-) create mode 100644 apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__no_typename_on_interface-2.snap create mode 100644 apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__no_typename_on_interface-3.snap create mode 100644 apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__no_typename_on_interface.snap diff --git a/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__no_typename_on_interface-2.snap b/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__no_typename_on_interface-2.snap new file mode 100644 index 0000000000..00772267f1 --- /dev/null +++ b/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__no_typename_on_interface-2.snap @@ -0,0 +1,13 @@ +--- +source: apollo-router/src/services/supergraph_service.rs +expression: with_typename +--- +{ + "data": { + "dog": { + "id": "8765", + "__typename": "Dog", + "name": "Spot" + } + } +} diff --git a/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__no_typename_on_interface-3.snap b/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__no_typename_on_interface-3.snap new file mode 100644 index 0000000000..f385b2bdd9 --- /dev/null +++ b/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__no_typename_on_interface-3.snap @@ -0,0 +1,12 @@ +--- +source: apollo-router/src/services/supergraph_service.rs +expression: with_reversed_fragments +--- +{ + "data": { + "dog": { + "name": "Spot", + "id": "0000" + } + } +} diff --git a/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__no_typename_on_interface.snap b/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__no_typename_on_interface.snap new file mode 100644 index 0000000000..2a443b1b15 --- /dev/null +++ b/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__no_typename_on_interface.snap @@ -0,0 +1,12 @@ +--- +source: apollo-router/src/services/supergraph_service.rs +expression: no_typename +--- +{ + "data": { + "dog": { + "id": "4321", + "name": "Spot" + } + } +} diff --git a/apollo-router/src/services/supergraph_service.rs b/apollo-router/src/services/supergraph_service.rs index 4f13c364e8..9f14a693a0 100644 --- a/apollo-router/src/services/supergraph_service.rs +++ b/apollo-router/src/services/supergraph_service.rs @@ -2933,10 +2933,10 @@ mod tests { serde_json::json!{{"data":{"dog":{"id":"4321","name":"Spot"}}}} ).with_json( serde_json::json!{{"query":"query dog__animal__0{dog{__typename id name}}", "operationName": "dog__animal__0"}}, - serde_json::json!{{"data":{"dog":{"__typename":"Dog","id":"4321","name":"Spot"}}}} + serde_json::json!{{"data":{"dog":{"__typename":"Dog","id":"8765","name":"Spot"}}}} ).with_json( serde_json::json!{{"query":"query dog__animal__0{dog{name id}}", "operationName": "dog__animal__0"}}, - serde_json::json!{{"data":{"dog":{"id":"4321","name":"Spot"}}}} + serde_json::json!{{"data":{"dog":{"id":"0000","name":"Spot"}}}} ).build()), ].into_iter().collect()); @@ -3014,6 +3014,7 @@ mod tests { let mut stream = service.clone().oneshot(request).await.unwrap(); let no_typename = stream.next_response().await.unwrap(); + insta::assert_json_snapshot!(no_typename); let request = supergraph::Request::fake_builder() .context(defer_context()) @@ -3057,6 +3058,7 @@ mod tests { with_typename, no_typename ); + insta::assert_json_snapshot!(with_typename); let request = supergraph::Request::fake_builder() .context(defer_context()) @@ -3099,5 +3101,6 @@ mod tests { with_reversed_fragments, no_typename ); + insta::assert_json_snapshot!(with_reversed_fragments); } } From 14fb8abaa93bf0115eb88a4a6613fe6c599d61f2 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Tue, 5 Sep 2023 11:51:26 +0200 Subject: [PATCH 18/50] keep using the current type as known type at the fragment application --- apollo-router/src/spec/query/tests.rs | 10 ++-------- apollo-router/src/spec/selection.rs | 18 ++++++++++++++++-- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/apollo-router/src/spec/query/tests.rs b/apollo-router/src/spec/query/tests.rs index 61c6b35cef..e3493baf6d 100644 --- a/apollo-router/src/spec/query/tests.rs +++ b/apollo-router/src/spec/query/tests.rs @@ -522,15 +522,9 @@ fn reformat_response_data_best_effort() { "baz": "2", }, "array": [ - { - "bar":null, - "baz":"3" - }, + {}, null, - { - "bar":"5", - "baz":null - } + {} ], "other": null, }, diff --git a/apollo-router/src/spec/selection.rs b/apollo-router/src/spec/selection.rs index 55ae883342..5252a7f5d9 100644 --- a/apollo-router/src/spec/selection.rs +++ b/apollo-router/src/spec/selection.rs @@ -151,15 +151,29 @@ impl Selection { .ok_or_else(|| SpecError::InvalidType(current_type.to_string()))?; let fragment_type = FieldType::new_named(type_condition.clone()); + let known_type = current_type.inner_type_name().map(|s| s.to_string()); + // this is the type we pass when extracting the fragment's selections + // If the type condition is a union or interface and the current type implements it, then we want + // to keep the current type when extracting the fragment's selections, as it is more precise + // than the interface. + // If it is not, then we use the type condition let relevant_type = if schema.is_interface(type_condition.as_str()) { + // Query validation should have already verified that current type implements that interface + debug_assert!( + schema.is_subtype( + type_condition.as_str(), + current_type.inner_type_name().unwrap_or("") + ) || + // if the current type and the type condition are both the same interface, it is still valid + type_condition.as_str() + == current_type.inner_type_name().unwrap_or("") + ); current_type } else { &fragment_type }; - let known_type = relevant_type.inner_type_name().map(|s| s.to_string()); - let selection_set = inline_fragment .selection_set() .selection() From 818199f5550f3bb5015154fa60458a9414e2f2c9 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Tue, 5 Sep 2023 12:29:00 +0200 Subject: [PATCH 19/50] metadata cleanup (#3746) * remove unused patch entries in Cargo.toml (referring to a 1 year old commit in opentelemetry) * remove exemptions for the chrono security advisories (they are fixed now) --- .changesets/maint_geal_metadata_cleanup.md | 6 ++++++ Cargo.toml | 7 ------- deny.toml | 6 +----- 3 files changed, 7 insertions(+), 12 deletions(-) create mode 100644 .changesets/maint_geal_metadata_cleanup.md diff --git a/.changesets/maint_geal_metadata_cleanup.md b/.changesets/maint_geal_metadata_cleanup.md new file mode 100644 index 0000000000..ee9628f870 --- /dev/null +++ b/.changesets/maint_geal_metadata_cleanup.md @@ -0,0 +1,6 @@ +### Metadata cleanup ([PR #3746](https://github.com/apollographql/router/pull/3746)) + +* remove unused patch entries in Cargo.toml +* remove exemptions for the chrono security advisories (they are fixed now) + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3746 \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index 3df068499c..9c44480765 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,10 +41,3 @@ incremental = false inherits = "release" debug = 1 -[patch.crates-io] -# TODO: to delete -# opentelemetry = { git = "https://github.com/open-telemetry/opentelemetry-rust.git", rev = "e5ef3552efab2bdbf2f838023c37461cd799ab2c"} -# opentelemetry-http = { git = "https://github.com/open-telemetry/opentelemetry-rust.git", rev = "e5ef3552efab2bdbf2f838023c37461cd799ab2c"} -# opentelemetry-jaeger = { git = "https://github.com/open-telemetry/opentelemetry-rust.git", rev = "e5ef3552efab2bdbf2f838023c37461cd799ab2c"} -# opentelemetry-zipkin = { git = "https://github.com/open-telemetry/opentelemetry-rust.git", rev = "e5ef3552efab2bdbf2f838023c37461cd799ab2c"} -# opentelemetry-datadog = { git = "https://github.com/open-telemetry/opentelemetry-rust.git", rev = "e5ef3552efab2bdbf2f838023c37461cd799ab2c"} diff --git a/deny.toml b/deny.toml index 138c3a6bc2..084bb9a883 100644 --- a/deny.toml +++ b/deny.toml @@ -26,13 +26,9 @@ git-fetch-with-cli = true # A list of advisory IDs to ignore. Note that ignored advisories will still # output a note when they are encountered. -# while https://github.com/chronotope/chrono/issues/499 is open. -# We need to keep track of this issue, and make sure `tracing-subscriber` is updated -# We will then be able to remove this -# # RUSTSEC-2023-0052 and RUSTSEC-2023-0053 are pending a webpki update that is tracked by https://github.com/apollographql/router/issues/3645 # and will be fixed by https://github.com/apollographql/router/pull/3643 -ignore = ["RUSTSEC-2020-0159", "RUSTSEC-2020-0071", "RUSTSEC-2023-0053", "RUSTSEC-2023-0052"] +ignore = ["RUSTSEC-2023-0053", "RUSTSEC-2023-0052"] # This section is considered when running `cargo deny check licenses` # More documentation for the licenses section can be found here: From 8e86ef41a3b5eef7f528fc827eeb227f33c27445 Mon Sep 17 00:00:00 2001 From: Gary Pennington Date: Tue, 5 Sep 2023 11:34:50 +0100 Subject: [PATCH 20/50] provide a rhai interface to the router service (#3234) Adds `Rhai` support for the `router_service`. It is now possible to interact with requests and responses at the `router_service` level from `Rhai`. The functionality is very similar to that provided for interacting with existing services, for example `supergraph_service`. For instance, you may map requests and responses as follows: ``` fn router_service(service) { const request_callback = Fn("process_request"); service.map_request(request_callback); const response_callback = Fn("process_response"); service.map_response(response_callback); } ``` The main difference from existing services is that the router_service is dealing with HTTP Bodies, not well formatted GraphQL objects. This means that the `Request.body` or `Response.body` is not a well structured object that you may interact with, but is simply a String. This makes it more complex to deal with Request and Response bodies with the tradeoff being that a script author has more power and can perform tasks which are just not possible within the confines of a well-formed GraphQL object. This simple example, simply logs the bodies: ``` // Generate a log for each request at this stage fn process_request(request) { print(`body: ${request.body}`); } // Generate a log for each response at this stage fn process_response(response) { print(`body: ${response.body}`); } ``` Fixes #2278 --------- Co-authored-by: Geoffroy Couprie Co-authored-by: Bryn Cooke --- .../feat_garypen_2278_rhai_router_service.md | 43 ++ apollo-router/src/plugins/rhai/engine.rs | 384 ++++++++++++++- apollo-router/src/plugins/rhai/execution.rs | 62 ++- apollo-router/src/plugins/rhai/mod.rs | 450 ++++++++++-------- apollo-router/src/plugins/rhai/router.rs | 70 +++ apollo-router/src/plugins/rhai/subgraph.rs | 59 +++ apollo-router/src/plugins/rhai/supergraph.rs | 62 ++- apollo-router/src/plugins/rhai/tests.rs | 8 +- .../tests/fixtures/test_callbacks.rhai | 14 +- apollo-router/tests/rhai_tests.rs | 5 +- docs/source/customizations/rhai-api.mdx | 29 +- docs/source/customizations/rhai.mdx | 11 +- 12 files changed, 955 insertions(+), 242 deletions(-) create mode 100644 .changesets/feat_garypen_2278_rhai_router_service.md create mode 100644 apollo-router/src/plugins/rhai/router.rs diff --git a/.changesets/feat_garypen_2278_rhai_router_service.md b/.changesets/feat_garypen_2278_rhai_router_service.md new file mode 100644 index 0000000000..c105892e4a --- /dev/null +++ b/.changesets/feat_garypen_2278_rhai_router_service.md @@ -0,0 +1,43 @@ +### Provide a rhai interface to the router service ([Issue #2278](https://github.com/apollographql/router/issues/2278)) + +Adds `Rhai` support for the `router_service`. + +It is now possible to interact with requests and responses at the `router_service` level from `Rhai`. The functionality is very similar to that provided for interacting with existing services, for example `supergraph_service`. For instance, you may map requests and responses as follows: + +```rust +fn router_service(service) { + const request_callback = Fn("process_request"); + service.map_request(request_callback); + const response_callback = Fn("process_response"); + service.map_response(response_callback); +} + +``` +The main difference from existing services is that the router_service is dealing with HTTP Bodies, not well formatted GraphQL objects. This means that the `Request.body` or `Response.body` is not a well structured object that you may interact with, but is simply a String. + +This makes it more complex to deal with Request and Response bodies with the tradeoff being that a script author has more power and can perform tasks which are just not possible within the confines of a well-formed GraphQL object. + +This simple example, simply logs the bodies: + +```rust +// Generate a log for each request at this stage +fn process_request(request) { + print(`body: ${request.body}`); +} + +// Generate a log for each response at this stage +fn process_response(response) { + print(`body: ${response.body}`); +} +``` + +This PR also introduces two new Rhai functions: + +```rust +json_encode(Object) +json_decode(String) -> Object + +``` +Which will respectively encode a `Rhai` Object or decode a JSON string into a `Rhai` Object. These functions may be helpful when dealing with String bodies which represent encoded JSON objects. + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/3234 diff --git a/apollo-router/src/plugins/rhai/engine.rs b/apollo-router/src/plugins/rhai/engine.rs index 135dda4556..ab1dd67194 100644 --- a/apollo-router/src/plugins/rhai/engine.rs +++ b/apollo-router/src/plugins/rhai/engine.rs @@ -6,6 +6,7 @@ use std::time::SystemTime; use base64::prelude::BASE64_STANDARD; use base64::Engine as _; +use bytes::Bytes; use http::header::InvalidHeaderName; use http::uri::Authority; use http::uri::Parts; @@ -30,6 +31,7 @@ use tower::BoxError; use uuid::Uuid; use super::execution; +use super::router; use super::subgraph; use super::supergraph; use super::Rhai; @@ -293,6 +295,16 @@ mod router_json { pub(crate) fn value_to_string(x: &mut Value) -> String { format!("{x:?}") } + + #[rhai_fn(pure, return_raw)] + pub(crate) fn json_encode(input: &mut Dynamic) -> Result> { + serde_json::to_string(input).map_err(|e| e.to_string().into()) + } + + #[rhai_fn(pure, return_raw)] + pub(crate) fn json_decode(input: &mut ImmutableString) -> Result> { + serde_json::from_str(input).map_err(|e| e.to_string().into()) + } } #[export_module] @@ -350,7 +362,67 @@ mod router_context { format!("{x:?}") } + #[rhai_fn(get = "context", pure, return_raw)] + pub(crate) fn router_first_response_context_get( + obj: &mut SharedMut, + ) -> Result> { + Ok(obj.with_mut(|response| response.context.clone())) + } + #[rhai_fn(set = "context", return_raw)] + pub(crate) fn router_first_response_context_set( + obj: &mut SharedMut, + context: Context, + ) -> Result<(), Box> { + obj.with_mut(|response| response.context = context); + Ok(()) + } + + #[rhai_fn(get = "context", pure, return_raw)] + pub(crate) fn supergraph_first_response_context_get( + obj: &mut SharedMut, + ) -> Result> { + Ok(obj.with_mut(|response| response.context.clone())) + } + #[rhai_fn(set = "context", return_raw)] + pub(crate) fn supergraph_first_response_context_set( + obj: &mut SharedMut, + context: Context, + ) -> Result<(), Box> { + obj.with_mut(|response| response.context = context); + Ok(()) + } + + #[rhai_fn(get = "context", pure, return_raw)] + pub(crate) fn execution_first_response_context_get( + obj: &mut SharedMut, + ) -> Result> { + Ok(obj.with_mut(|response| response.context.clone())) + } + #[rhai_fn(set = "context", return_raw)] + pub(crate) fn execution_first_response_context_set( + obj: &mut SharedMut, + context: Context, + ) -> Result<(), Box> { + obj.with_mut(|response| response.context = context); + Ok(()) + } + // Add context getter/setters for deferred responses + #[rhai_fn(get = "context", pure, return_raw)] + pub(crate) fn router_deferred_response_context_get( + obj: &mut SharedMut, + ) -> Result> { + Ok(obj.with_mut(|response| response.context.clone())) + } + #[rhai_fn(set = "context", return_raw)] + pub(crate) fn router_deferred_response_context_set( + obj: &mut SharedMut, + context: Context, + ) -> Result<(), Box> { + obj.with_mut(|response| response.context = context); + Ok(()) + } + #[rhai_fn(get = "context", pure, return_raw)] pub(crate) fn supergraph_deferred_response_context_get( obj: &mut SharedMut, @@ -468,22 +540,48 @@ mod router_plugin { } // End of SubgraphRequest specific section + #[rhai_fn(get = "headers", pure, return_raw)] + pub(crate) fn get_originating_headers_router_response( + obj: &mut SharedMut, + ) -> Result> { + Ok(obj.with_mut(|response| response.response.headers().clone())) + } + + #[rhai_fn(name = "is_primary", pure)] + pub(crate) fn router_response_is_primary(_obj: &mut SharedMut) -> bool { + true + } + + #[rhai_fn(get = "headers", pure, return_raw)] + pub(crate) fn get_originating_headers_router_deferred_response( + _obj: &mut SharedMut, + ) -> Result> { + Err(CANNOT_ACCESS_HEADERS_ON_A_DEFERRED_RESPONSE.into()) + } + + #[rhai_fn(name = "is_primary", pure)] + pub(crate) fn router_deferred_response_is_primary( + _obj: &mut SharedMut, + ) -> bool { + false + } + #[rhai_fn(get = "headers", pure, return_raw)] pub(crate) fn get_originating_headers_supergraph_response( - obj: &mut SharedMut, + obj: &mut SharedMut, ) -> Result> { Ok(obj.with_mut(|response| response.response.headers().clone())) } #[rhai_fn(name = "is_primary", pure)] pub(crate) fn supergraph_response_is_primary( - _obj: &mut SharedMut, + _obj: &mut SharedMut, ) -> bool { true } #[rhai_fn(get = "headers", pure, return_raw)] - pub(crate) fn get_originating_headers_router_deferred_response( + pub(crate) fn get_originating_headers_supergraph_deferred_response( _obj: &mut SharedMut, ) -> Result> { Err(CANNOT_ACCESS_HEADERS_ON_A_DEFERRED_RESPONSE.into()) @@ -498,13 +596,15 @@ mod router_plugin { #[rhai_fn(get = "headers", pure, return_raw)] pub(crate) fn get_originating_headers_execution_response( - obj: &mut SharedMut, + obj: &mut SharedMut, ) -> Result> { Ok(obj.with_mut(|response| response.response.headers().clone())) } #[rhai_fn(name = "is_primary", pure)] - pub(crate) fn execution_response_is_primary(_obj: &mut SharedMut) -> bool { + pub(crate) fn execution_response_is_primary( + _obj: &mut SharedMut, + ) -> bool { true } @@ -529,16 +629,24 @@ mod router_plugin { Ok(obj.with_mut(|response| response.response.headers().clone())) } + /*TODO: reenable when https://github.com/apollographql/router/issues/3642 is decided + #[rhai_fn(get = "body", pure, return_raw)] + pub(crate) fn get_originating_body_router_response( + obj: &mut SharedMut, + ) -> Result, Box> { + Ok(obj.with_mut(|response| response.response.body().to_vec())) + }*/ + #[rhai_fn(get = "body", pure, return_raw)] pub(crate) fn get_originating_body_supergraph_response( - obj: &mut SharedMut, + obj: &mut SharedMut, ) -> Result> { Ok(obj.with_mut(|response| response.response.body().clone())) } #[rhai_fn(get = "body", pure, return_raw)] pub(crate) fn get_originating_body_execution_response( - obj: &mut SharedMut, + obj: &mut SharedMut, ) -> Result> { Ok(obj.with_mut(|response| response.response.body().clone())) } @@ -550,8 +658,24 @@ mod router_plugin { Ok(obj.with_mut(|response| response.response.body().clone())) } + /*TODO: reenable when https://github.com/apollographql/router/issues/3642 is decided #[rhai_fn(get = "body", pure, return_raw)] pub(crate) fn get_originating_body_router_deferred_response( + obj: &mut SharedMut, + ) -> Result> { + // Get the body + let bytes = obj.with_mut(|response| { + let bytes = std::mem::take(&mut response.response); + // Copy back the response so it can continue to be used + response.response = bytes.clone(); + Ok::>(bytes) + })?; + + String::from_utf8(bytes.to_vec()).map_err(|err| err.to_string().into()) + }*/ + + #[rhai_fn(get = "body", pure, return_raw)] + pub(crate) fn get_originating_body_supergraph_deferred_response( obj: &mut SharedMut, ) -> Result> { Ok(obj.with_mut(|response| response.response.clone())) @@ -565,8 +689,8 @@ mod router_plugin { } #[rhai_fn(set = "headers", return_raw)] - pub(crate) fn set_originating_headers_supergraph_response( - obj: &mut SharedMut, + pub(crate) fn set_originating_headers_router_response( + obj: &mut SharedMut, headers: HeaderMap, ) -> Result<(), Box> { obj.with_mut(|response| *response.response.headers_mut() = headers); @@ -575,6 +699,23 @@ mod router_plugin { #[rhai_fn(set = "headers", return_raw)] pub(crate) fn set_originating_headers_router_deferred_response( + _obj: &mut SharedMut, + _headers: HeaderMap, + ) -> Result<(), Box> { + Err(CANNOT_ACCESS_HEADERS_ON_A_DEFERRED_RESPONSE.into()) + } + + #[rhai_fn(set = "headers", return_raw)] + pub(crate) fn set_originating_headers_supergraph_response( + obj: &mut SharedMut, + headers: HeaderMap, + ) -> Result<(), Box> { + obj.with_mut(|response| *response.response.headers_mut() = headers); + Ok(()) + } + + #[rhai_fn(set = "headers", return_raw)] + pub(crate) fn set_originating_headers_supergraph_deferred_response( _obj: &mut SharedMut, _headers: HeaderMap, ) -> Result<(), Box> { @@ -583,7 +724,7 @@ mod router_plugin { #[rhai_fn(set = "headers", return_raw)] pub(crate) fn set_originating_headers_execution_response( - obj: &mut SharedMut, + obj: &mut SharedMut, headers: HeaderMap, ) -> Result<(), Box> { obj.with_mut(|response| *response.response.headers_mut() = headers); @@ -607,9 +748,20 @@ mod router_plugin { Ok(()) } + /*TODO: reenable when https://github.com/apollographql/router/issues/3642 is decided + #[rhai_fn(set = "body", return_raw)] + pub(crate) fn set_originating_body_router_response( + obj: &mut SharedMut, + body: String, + ) -> Result<(), Box> { + let bytes = Bytes::from(body); + obj.with_mut(|response| *response.response.body_mut() = bytes); + Ok(()) + }*/ + #[rhai_fn(set = "body", return_raw)] pub(crate) fn set_originating_body_supergraph_response( - obj: &mut SharedMut, + obj: &mut SharedMut, body: Response, ) -> Result<(), Box> { obj.with_mut(|response| *response.response.body_mut() = body); @@ -618,7 +770,7 @@ mod router_plugin { #[rhai_fn(set = "body", return_raw)] pub(crate) fn set_originating_body_execution_response( - obj: &mut SharedMut, + obj: &mut SharedMut, body: Response, ) -> Result<(), Box> { obj.with_mut(|response| *response.response.body_mut() = body); @@ -634,8 +786,19 @@ mod router_plugin { Ok(()) } + /*TODO: reenable when https://github.com/apollographql/router/issues/3642 is decided #[rhai_fn(set = "body", return_raw)] pub(crate) fn set_originating_body_router_deferred_response( + obj: &mut SharedMut, + body: String, + ) -> Result<(), Box> { + let bytes = Bytes::from(body); + obj.with_mut(|response| response.response = bytes); + Ok(()) + }*/ + + #[rhai_fn(set = "body", return_raw)] + pub(crate) fn set_originating_body_supergraph_deferred_response( obj: &mut SharedMut, body: Response, ) -> Result<(), Box> { @@ -678,7 +841,18 @@ mod router_plugin { } #[rhai_fn(name = "headers_are_available", pure)] - pub(crate) fn supergraph_response(_: &mut SharedMut) -> bool { + pub(crate) fn router_response(_: &mut SharedMut) -> bool { + true + } + + /*TODO: reenable when https://github.com/apollographql/router/issues/3642 is decided + #[rhai_fn(name = "headers_are_available", pure)] + pub(crate) fn router_deferred_response(_: &mut SharedMut) -> bool { + false + }*/ + + #[rhai_fn(name = "headers_are_available", pure)] + pub(crate) fn supergraph_response(_: &mut SharedMut) -> bool { true } @@ -690,7 +864,7 @@ mod router_plugin { } #[rhai_fn(name = "headers_are_available", pure)] - pub(crate) fn execution_response(_: &mut SharedMut) -> bool { + pub(crate) fn execution_response(_: &mut SharedMut) -> bool { true } @@ -934,15 +1108,29 @@ mod router_plugin { } #[derive(Default)] -pub(crate) struct RhaiExecutionResponse { +pub(crate) struct RhaiRouterFirstRequest { pub(crate) context: Context, - pub(crate) response: http_ext::Response, + pub(crate) request: http::Request<()>, } +#[allow(dead_code)] #[derive(Default)] -pub(crate) struct RhaiExecutionDeferredResponse { +pub(crate) struct RhaiRouterChunkedRequest { pub(crate) context: Context, - pub(crate) response: Response, + pub(crate) request: Bytes, +} + +#[derive(Default)] +pub(crate) struct RhaiRouterResponse { + pub(crate) context: Context, + pub(crate) response: http::Response<()>, +} + +#[allow(dead_code)] +#[derive(Default)] +pub(crate) struct RhaiRouterChunkedResponse { + pub(crate) context: Context, + pub(crate) response: Bytes, } #[derive(Default)] @@ -957,6 +1145,18 @@ pub(crate) struct RhaiSupergraphDeferredResponse { pub(crate) response: Response, } +#[derive(Default)] +pub(crate) struct RhaiExecutionResponse { + pub(crate) context: Context, + pub(crate) response: http_ext::Response, +} + +#[derive(Default)] +pub(crate) struct RhaiExecutionDeferredResponse { + pub(crate) context: Context, + pub(crate) response: Response, +} + macro_rules! if_subgraph { ( subgraph => $subgraph: block else $not_subgraph: block ) => { $subgraph @@ -966,6 +1166,152 @@ macro_rules! if_subgraph { }; } +macro_rules! register_rhai_router_interface { + ($engine: ident, $($base: ident), *) => { + $( + // Context stuff + $engine.register_get( + "context", + |obj: &mut SharedMut<$base::FirstRequest>| -> Result> { + Ok(obj.with_mut(|request| request.context.clone())) + } + ) + .register_get( + "context", + |obj: &mut SharedMut<$base::ChunkedRequest>| -> Result> { + Ok(obj.with_mut(|request| request.context.clone())) + } + ).register_get( + "context", + |obj: &mut SharedMut<$base::Response>| -> Result> { + Ok(obj.with_mut(|response| response.context.clone())) + } + ) + .register_get( + "context", + |obj: &mut SharedMut<$base::DeferredResponse>| -> Result> { + Ok(obj.with_mut(|response| response.context.clone())) + } + ); + + $engine.register_set( + "context", + |obj: &mut SharedMut<$base::FirstRequest>, context: Context| { + obj.with_mut(|request| request.context = context); + Ok(()) + } + ) + .register_set( + "context", + |obj: &mut SharedMut<$base::ChunkedRequest>, context: Context| { + obj.with_mut(|request| request.context = context); + Ok(()) + } + ) + .register_set( + "context", + |obj: &mut SharedMut<$base::Response>, context: Context| { + obj.with_mut(|response| response.context = context); + Ok(()) + } + ).register_set( + "context", + |obj: &mut SharedMut<$base::DeferredResponse>, context: Context| { + obj.with_mut(|response| response.context = context); + Ok(()) + } + ); + + // Originating Request + $engine.register_get( + "headers", + |obj: &mut SharedMut<$base::FirstRequest>| -> Result> { + Ok(obj.with_mut(|request| request.request.headers().clone())) + } + ).register_get( + "headers", + |obj: &mut SharedMut<$base::Response>| -> Result> { + Ok(obj.with_mut(|response| response.response.headers().clone())) + } + ); + + $engine.register_set( + "headers", + |obj: &mut SharedMut<$base::FirstRequest>, headers: HeaderMap| { + if_subgraph! { + $base => { + let _unused = (obj, headers); + Err("cannot mutate originating request on a subgraph".into()) + } else { + obj.with_mut(|request| *request.request.headers_mut() = headers); + Ok(()) + } + } + } + ).register_set( + "headers", + |obj: &mut SharedMut<$base::Response>, headers: HeaderMap| { + if_subgraph! { + $base => { + let _unused = (obj, headers); + Err("cannot mutate originating request on a subgraph".into()) + } else { + obj.with_mut(|response| *response.response.headers_mut() = headers); + Ok(()) + } + } + } + ); + + /*TODO: reenable when https://github.com/apollographql/router/issues/3642 is decided + $engine.register_get( + "body", + |obj: &mut SharedMut<$base::ChunkedRequest>| -> Result, Box> { + Ok( obj.with_mut(|request| { request.request.to_vec()})) + } + ); + + $engine.register_set( + "body", + |obj: &mut SharedMut<$base::ChunkedRequest>, body: Vec| { + if_subgraph! { + $base => { + let _unused = (obj, body); + Err("cannot mutate originating request on a subgraph".into()) + } else { + let bytes = Bytes::from(body); + obj.with_mut(|request| request.request = bytes); + Ok(()) + } + } + } + );*/ + + $engine.register_get( + "uri", + |obj: &mut SharedMut<$base::Request>| -> Result> { + Ok(obj.with_mut(|request| request.router_request.uri().clone())) + } + ); + + $engine.register_set( + "uri", + |obj: &mut SharedMut<$base::Request>, uri: Uri| { + if_subgraph! { + $base => { + let _unused = (obj, uri); + Err("cannot mutate originating request on a subgraph".into()) + } else { + obj.with_mut(|request| *request.router_request.uri_mut() = uri); + Ok(()) + } + } + } + ); + )* + }; +} + macro_rules! register_rhai_interface { ($engine: ident, $($base: ident), *) => { $( @@ -1190,6 +1536,8 @@ impl Rhai { tracing::error!(%message, target = %error_main); }); // Add common getter/setters for different types + register_rhai_router_interface!(engine, router); + // Add common getter/setters for different types register_rhai_interface!(engine, supergraph, execution, subgraph); // Since constants in Rhai don't give us the behaviour we expect, let's create some global diff --git a/apollo-router/src/plugins/rhai/execution.rs b/apollo-router/src/plugins/rhai/execution.rs index ea9cfd0d60..a04b00211d 100644 --- a/apollo-router/src/plugins/rhai/execution.rs +++ b/apollo-router/src/plugins/rhai/execution.rs @@ -1,5 +1,65 @@ //! execution module +use std::ops::ControlFlow; + +use tower::BoxError; + +use super::ErrorDetails; +use crate::graphql::Error; pub(crate) use crate::services::execution::*; -pub(crate) type Response = super::engine::RhaiExecutionResponse; +use crate::Context; + +pub(crate) type FirstResponse = super::engine::RhaiExecutionResponse; pub(crate) type DeferredResponse = super::engine::RhaiExecutionDeferredResponse; + +pub(super) fn request_failure( + context: Context, + error_details: ErrorDetails, +) -> Result, BoxError> { + let res = if let Some(body) = error_details.body { + Response::builder() + .extensions(body.extensions) + .errors(body.errors) + .status_code(error_details.status) + .context(context) + .and_data(body.data) + .and_label(body.label) + .and_path(body.path) + .build()? + } else { + Response::error_builder() + .errors(vec![Error { + message: error_details.message.unwrap_or_default(), + ..Default::default() + }]) + .context(context) + .status_code(error_details.status) + .build()? + }; + + Ok(ControlFlow::Break(res)) +} + +pub(super) fn response_failure(context: Context, error_details: ErrorDetails) -> Response { + if let Some(body) = error_details.body { + Response::builder() + .extensions(body.extensions) + .errors(body.errors) + .status_code(error_details.status) + .context(context) + .and_data(body.data) + .and_label(body.label) + .and_path(body.path) + .build() + } else { + Response::error_builder() + .errors(vec![Error { + message: error_details.message.unwrap_or_default(), + ..Default::default() + }]) + .status_code(error_details.status) + .context(context) + .build() + } + .expect("can't fail to build our error message") +} diff --git a/apollo-router/src/plugins/rhai/mod.rs b/apollo-router/src/plugins/rhai/mod.rs index 5dce61bccb..2f19f9cf32 100644 --- a/apollo-router/src/plugins/rhai/mod.rs +++ b/apollo-router/src/plugins/rhai/mod.rs @@ -45,22 +45,14 @@ use crate::layers::ServiceBuilderExt; use crate::plugin::Plugin; use crate::plugin::PluginInit; use crate::plugins::rhai::engine::OptionDance; -use crate::plugins::rhai::engine::RhaiExecutionDeferredResponse; -use crate::plugins::rhai::engine::RhaiExecutionResponse; -use crate::plugins::rhai::engine::RhaiSupergraphDeferredResponse; -use crate::plugins::rhai::engine::RhaiSupergraphResponse; use crate::register_plugin; -use crate::services::ExecutionRequest; -use crate::services::ExecutionResponse; -use crate::services::SupergraphRequest; -use crate::services::SupergraphResponse; -use crate::Context; mod engine; pub(crate) const RHAI_SPAN_NAME: &str = "rhai_plugin"; mod execution; +mod router; mod subgraph; mod supergraph; @@ -230,6 +222,24 @@ impl Plugin for Rhai { }) } + fn router_service(&self, service: router::BoxService) -> router::BoxService { + const FUNCTION_NAME_SERVICE: &str = "router_service"; + if !self.ast_has_function(FUNCTION_NAME_SERVICE) { + return service; + } + tracing::debug!("router_service function found"); + let shared_service = Arc::new(Mutex::new(Some(service))); + if let Err(error) = self.run_rhai_service( + FUNCTION_NAME_SERVICE, + None, + ServiceStep::Router(shared_service.clone()), + self.block.load().scope.clone(), + ) { + tracing::error!("service callback failed: {error}"); + } + shared_service.take_unwrap() + } + fn supergraph_service(&self, service: supergraph::BoxService) -> supergraph::BoxService { const FUNCTION_NAME_SERVICE: &str = "supergraph_service"; if !self.ast_has_function(FUNCTION_NAME_SERVICE) { @@ -297,6 +307,7 @@ impl Drop for Rhai { #[derive(Clone, Debug)] pub(crate) enum ServiceStep { + Router(SharedMut), Supergraph(SharedMut), Execution(SharedMut), Subgraph(SharedMut), @@ -318,57 +329,15 @@ macro_rules! gen_map_request { ServiceBuilder::new() .instrument(rhai_service_span()) .checkpoint(move |request: $base::Request| { - // Let's define a local function to build an error response - fn failure_message( - context: Context, - error_details: ErrorDetails, - ) -> Result, BoxError> - { - let res = if let Some(body) = error_details.body { - $base::Response::builder() - .extensions(body.extensions) - .errors(body.errors) - .status_code(error_details.status) - .context(context) - .and_data(body.data) - .and_label(body.label) - .and_path(body.path) - .build() - } else { - $base::Response::error_builder() - .errors(vec![Error { - message: error_details.message.unwrap_or_default(), - ..Default::default() - }]) - .context(context) - .status_code(error_details.status) - .build()? - }; - - Ok(ControlFlow::Break(res)) - } let shared_request = Shared::new(Mutex::new(Some(request))); - let result: Result> = if $callback.is_curried() { - $callback.call( - &$rhai_service.engine, - &$rhai_service.ast, - (shared_request.clone(),), - ) - } else { - let mut guard = $rhai_service.scope.lock().unwrap(); - $rhai_service.engine.call_fn( - &mut guard, - &$rhai_service.ast, - $callback.fn_name(), - (shared_request.clone(),), - ) - }; + let result: Result> = + execute(&$rhai_service, &$callback, (shared_request.clone(),)); if let Err(error) = result { let error_details = process_error(error); tracing::error!("map_request callback failed: {error_details:#?}"); let mut guard = shared_request.lock().unwrap(); let request_opt = guard.take(); - return failure_message(request_opt.unwrap().context, error_details); + return $base::request_failure(request_opt.unwrap().context, error_details); } let mut guard = shared_request.lock().unwrap(); let request_opt = guard.take(); @@ -381,49 +350,33 @@ macro_rules! gen_map_request { } // Actually use the checkpoint function so that we can shortcut requests which fail -macro_rules! gen_map_deferred_request { - ($request: ident, $response: ident, $borrow: ident, $rhai_service: ident, $callback: ident) => { +macro_rules! gen_map_router_deferred_request { + ($base: ident, $borrow: ident, $rhai_service: ident, $callback: ident) => { $borrow.replace(|service| { - fn rhai_service_span() -> impl Fn(&$request) -> tracing::Span + Clone { - move |_request: &$request| { + fn rhai_service_span() -> impl Fn(&$base::Request) -> tracing::Span + Clone { + move |_request: &$base::Request| { tracing::info_span!( RHAI_SPAN_NAME, - "rhai service" = stringify!($request), + "rhai service" = stringify!($base::Request), "otel.kind" = "INTERNAL" ) } } ServiceBuilder::new() .instrument(rhai_service_span()) - .checkpoint(move |request: $request| { - // Let's define a local function to build an error response - fn failure_message( - context: Context, - error_details: ErrorDetails, - ) -> Result, BoxError> { - let res = if let Some(body) = error_details.body { - $response::builder() - .extensions(body.extensions) - .errors(body.errors) - .status_code(error_details.status) - .context(context) - .and_data(body.data) - .and_label(body.label) - .and_path(body.path) - .build()? - } else { - $response::error_builder() - .errors(vec![Error { - message: error_details.message.unwrap_or_default(), - ..Default::default() - }]) - .context(context) - .status_code(error_details.status) - .build()? - }; + .checkpoint( move |chunked_request: $base::Request| { + // we split the request stream into headers+first body chunk, then a stream of chunks + // for which we will implement mapping later + let $base::Request { router_request, context } = chunked_request; + let (parts, stream) = router_request.into_parts(); - Ok(ControlFlow::Break(res)) - } + let request = $base::FirstRequest { + context, + request: http::Request::from_parts( + parts, + (), + ), + }; let shared_request = Shared::new(Mutex::new(Some(request))); let result = execute(&$rhai_service, &$callback, (shared_request.clone(),)); @@ -432,76 +385,97 @@ macro_rules! gen_map_deferred_request { let error_details = process_error(error); let mut guard = shared_request.lock().unwrap(); let request_opt = guard.take(); - return failure_message(request_opt.unwrap().context, error_details); + return $base::request_failure(request_opt.unwrap().context, error_details); } - let mut guard = shared_request.lock().unwrap(); - let request_opt = guard.take(); - Ok(ControlFlow::Continue(request_opt.unwrap())) + + let request_opt = shared_request.lock().unwrap().take(); + + let $base::FirstRequest { context, request } = + request_opt.unwrap(); + let (parts, _body) = http::Request::from(request).into_parts(); + + // Finally, return a response which has a Body that wraps our stream of response chunks. + Ok(ControlFlow::Continue($base::Request { + context, + router_request: http::Request::from_parts(parts, stream), + })) + + /*TODO: reenable when https://github.com/apollographql/router/issues/3642 is decided + let ctx = context.clone(); + let rhai_service = $rhai_service.clone(); + let callback = $callback.clone(); + + let mapped_stream = stream + .map_err(BoxError::from) + .and_then(move |chunk| { + let context = ctx.clone(); + let rhai_service = rhai_service.clone(); + let callback = callback.clone(); + async move { + let request = $base::ChunkedRequest { + context, + request: chunk.into(), + }; + let shared_request = Shared::new(Mutex::new(Some(request))); + + let result = execute( + &rhai_service, + &callback, + (shared_request.clone(),), + ); + + if let Err(error) = result { + tracing::error!("map_request callback failed: {error}"); + let error_details = process_error(error); + let error = Error { + message: error_details.message.unwrap_or_default(), + ..Default::default() + }; + // We don't have a structured response to work with here. Let's + // throw away our response and custom build an error response + let error_response = graphql::Response::builder() + .errors(vec![error]).build(); + return Ok(serde_json::to_vec(&error_response)?.into()); + } + + let request_opt = shared_request.lock().unwrap().take(); + let $base::ChunkedRequest { request, .. } = + request_opt.unwrap(); + Ok(request) + } + }); + + // Finally, return a response which has a Body that wraps our stream of response chunks. + Ok(ControlFlow::Continue($base::Request { + context, + router_request: http::Request::from_parts(parts, hyper::Body::wrap_stream(mapped_stream)), + })) + */ }) .service(service) .boxed() }) }; } + macro_rules! gen_map_response { ($base: ident, $borrow: ident, $rhai_service: ident, $callback: ident) => { $borrow.replace(|service| { service .map_response(move |response: $base::Response| { - // Let's define a local function to build an error response - // XXX: This isn't ideal. We already have a response, so ideally we'd - // like to append this error into the existing response. However, - // the significantly different treatment of errors in different - // response types makes this extremely painful. This needs to be - // re-visited at some point post GA. - fn failure_message( - context: Context, - error_details: ErrorDetails, - ) -> $base::Response { - if let Some(body) = error_details.body { - $base::Response::builder() - .extensions(body.extensions) - .errors(body.errors) - .status_code(error_details.status) - .context(context) - .and_data(body.data) - .and_label(body.label) - .and_path(body.path) - .build() - } else { - $base::Response::error_builder() - .errors(vec![Error { - message: error_details.message.unwrap_or_default(), - ..Default::default() - }]) - .status_code(error_details.status) - .context(context) - .build() - .expect("can't fail to build our error message") - } - } let shared_response = Shared::new(Mutex::new(Some(response))); - let result: Result> = if $callback.is_curried() { - $callback.call( - &$rhai_service.engine, - &$rhai_service.ast, - (shared_response.clone(),), - ) - } else { - let mut guard = $rhai_service.scope.lock().unwrap(); - $rhai_service.engine.call_fn( - &mut guard, - &$rhai_service.ast, - $callback.fn_name(), - (shared_response.clone(),), - ) - }; + let result: Result> = + execute(&$rhai_service, &$callback, (shared_response.clone(),)); + if let Err(error) = result { tracing::error!("map_response callback failed: {error}"); let error_details = process_error(error); let mut guard = shared_response.lock().unwrap(); let response_opt = guard.take(); - return failure_message(response_opt.unwrap().context, error_details); + return $base::response_failure( + response_opt.unwrap().context, + error_details, + ); } let mut guard = shared_response.lock().unwrap(); let response_opt = guard.take(); @@ -512,46 +486,122 @@ macro_rules! gen_map_response { }; } -macro_rules! gen_map_deferred_response { - ($response: ident, $rhai_response: ident, $rhai_deferred_response: ident, $borrow: ident, $rhai_service: ident, $callback: ident) => { +// Even though this macro is only ever used to generate router service handling, I'm leaving it as +// a macro so that the code shape is "similar" to the way in which other services are processed. +// +// I can't easily unify the macros because the router response processing is quite different to +// other service in terms of payload. +macro_rules! gen_map_router_deferred_response { + ($base: ident, $borrow: ident, $rhai_service: ident, $callback: ident) => { $borrow.replace(|service| { BoxService::new(service.and_then( - |mapped_response: $response| async move { - // Let's define a local function to build an error response - // XXX: This isn't ideal. We already have a response, so ideally we'd - // like to append this error into the existing response. However, - // the significantly different treatment of errors in different - // response types makes this extremely painful. This needs to be - // re-visited at some point post GA. - fn failure_message( - context: Context, - error_details: ErrorDetails, - ) -> $response { - if let Some(body) = error_details.body { - $response::builder() - .extensions(body.extensions) - .errors(body.errors) - .status_code(error_details.status) - .context(context) - .and_data(body.data) - .and_label(body.label) - .and_path(body.path) - .build() - } else { - $response::error_builder() - .errors(vec![Error { + |mapped_response: $base::Response| async move { + // we split the response stream into headers+first response, then a stream of deferred responses + // for which we will implement mapping later + let $base::Response { response, context } = mapped_response; + let (parts, stream) = response.into_parts(); + + let response = $base::FirstResponse { + context, + response: http::Response::from_parts( + parts, + (), + ) + .into(), + }; + let shared_response = Shared::new(Mutex::new(Some(response))); + + let result = + execute(&$rhai_service, &$callback, (shared_response.clone(),)); + if let Err(error) = result { + tracing::error!("map_response callback failed: {error}"); + let error_details = process_error(error); + let response_opt = shared_response.lock().unwrap().take(); + return Ok($base::response_failure( + response_opt.unwrap().context, + error_details + )); + } + + let response_opt = shared_response.lock().unwrap().take(); + + let $base::FirstResponse { context, response } = + response_opt.unwrap(); + let (parts, _body) = http::Response::from(response).into_parts(); + + + // Finally, return a response which has a Body that wraps our stream of response chunks. + Ok($base::Response { + context, + response: http::Response::from_parts(parts, stream), + }) + + /*TODO: reenable when https://github.com/apollographql/router/issues/3642 is decided + let ctx = context.clone(); + + let mapped_stream = rest + .map_err(BoxError::from) + .and_then(move |deferred_response| { + let rhai_service = $rhai_service.clone(); + let context = ctx.clone(); + let callback = $callback.clone(); + async move { + let response = $base::DeferredResponse { + context, + response: deferred_response.into(), + }; + let shared_response = Shared::new(Mutex::new(Some(response))); + + let result = execute( + &rhai_service, + &callback, + (shared_response.clone(),), + ); + + if let Err(error) = result { + tracing::error!("map_response callback failed: {error}"); + let error_details = process_error(error); + let error = Error { message: error_details.message.unwrap_or_default(), ..Default::default() - }]) - .status_code(error_details.status) - .context(context) - .build() - }.expect("can't fail to build our error message") - } + }; + // We don't have a structured response to work with here. Let's + // throw away our response and custom build an error response + let error_response = graphql::Response::builder() + .errors(vec![error]).build(); + return Ok(serde_json::to_vec(&error_response)?.into()); + } + + let response_opt = shared_response.lock().unwrap().take(); + let $base::DeferredResponse { response, .. } = + response_opt.unwrap(); + Ok(response) + } + }); + + // Create our response stream which consists of the bytes from our first body chained with the + // rest of the responses in our mapped stream. + let final_stream = once(ready(Ok(body))).chain(mapped_stream).boxed(); + + // Finally, return a response which has a Body that wraps our stream of response chunks. + Ok($base::Response { + context, + response: http::Response::from_parts(parts, hyper::Body::wrap_stream(final_stream)), + })*/ + }, + )) + }) + }; +} +macro_rules! gen_map_deferred_response { + ($base: ident, $borrow: ident, $rhai_service: ident, $callback: ident) => { + $borrow.replace(|service| { + BoxService::new(service.and_then( + |mapped_response: $base::Response| async move { // we split the response stream into headers+first response, then a stream of deferred responses // for which we will implement mapping later - let $response { response, context } = mapped_response; + let $base::Response { response, context } = mapped_response; let (parts, stream) = response.into_parts(); let (first, rest) = stream.into_future().await; @@ -562,13 +612,13 @@ macro_rules! gen_map_deferred_response { position: None, body: None }; - return Ok(failure_message( + return Ok($base::response_failure( context, error_details )); } - let response = $rhai_response { + let response = $base::FirstResponse { context, response: http::Response::from_parts( parts, @@ -585,7 +635,7 @@ macro_rules! gen_map_deferred_response { let error_details = process_error(error); let mut guard = shared_response.lock().unwrap(); let response_opt = guard.take(); - return Ok(failure_message( + return Ok($base::response_failure( response_opt.unwrap().context, error_details )); @@ -593,7 +643,7 @@ macro_rules! gen_map_deferred_response { let mut guard = shared_response.lock().unwrap(); let response_opt = guard.take(); - let $rhai_response { context, response } = + let $base::FirstResponse { context, response } = response_opt.unwrap(); let (parts, body) = http::Response::from(response).into_parts(); @@ -604,7 +654,7 @@ macro_rules! gen_map_deferred_response { let context = context.clone(); let callback = $callback.clone(); async move { - let response = $rhai_deferred_response { + let response = $base::DeferredResponse { context, response: deferred_response, }; @@ -620,7 +670,7 @@ macro_rules! gen_map_deferred_response { let error_details = process_error(error); let mut guard = shared_response.lock().unwrap(); let response_opt = guard.take(); - let $rhai_deferred_response { mut response, .. } = response_opt.unwrap(); + let $base::DeferredResponse { mut response, .. } = response_opt.unwrap(); let error = Error { message: error_details.message.unwrap_or_default(), ..Default::default() @@ -631,7 +681,7 @@ macro_rules! gen_map_deferred_response { let mut guard = shared_response.lock().unwrap(); let response_opt = guard.take(); - let $rhai_deferred_response { response, .. } = + let $base::DeferredResponse { response, .. } = response_opt.unwrap(); Some(response) } @@ -642,7 +692,7 @@ macro_rules! gen_map_deferred_response { once(ready(body)).chain(mapped_stream).boxed(), ) .into(); - Ok($response { + Ok($base::Response { context: ctx, response, }) @@ -655,23 +705,14 @@ macro_rules! gen_map_deferred_response { impl ServiceStep { fn map_request(&mut self, rhai_service: RhaiService, callback: FnPtr) { match self { + ServiceStep::Router(service) => { + gen_map_router_deferred_request!(router, service, rhai_service, callback); + } ServiceStep::Supergraph(service) => { - gen_map_deferred_request!( - SupergraphRequest, - SupergraphResponse, - service, - rhai_service, - callback - ); + gen_map_request!(supergraph, service, rhai_service, callback); } ServiceStep::Execution(service) => { - gen_map_deferred_request!( - ExecutionRequest, - ExecutionResponse, - service, - rhai_service, - callback - ); + gen_map_request!(execution, service, rhai_service, callback); } ServiceStep::Subgraph(service) => { gen_map_request!(subgraph, service, rhai_service, callback); @@ -681,25 +722,14 @@ impl ServiceStep { fn map_response(&mut self, rhai_service: RhaiService, callback: FnPtr) { match self { + ServiceStep::Router(service) => { + gen_map_router_deferred_response!(router, service, rhai_service, callback); + } ServiceStep::Supergraph(service) => { - gen_map_deferred_response!( - SupergraphResponse, - RhaiSupergraphResponse, - RhaiSupergraphDeferredResponse, - service, - rhai_service, - callback - ); + gen_map_deferred_response!(supergraph, service, rhai_service, callback); } ServiceStep::Execution(service) => { - gen_map_deferred_response!( - ExecutionResponse, - RhaiExecutionResponse, - RhaiExecutionDeferredResponse, - service, - rhai_service, - callback - ); + gen_map_deferred_response!(execution, service, rhai_service, callback); } ServiceStep::Subgraph(service) => { gen_map_response!(subgraph, service, rhai_service, callback); diff --git a/apollo-router/src/plugins/rhai/router.rs b/apollo-router/src/plugins/rhai/router.rs new file mode 100644 index 0000000000..2a6313daf2 --- /dev/null +++ b/apollo-router/src/plugins/rhai/router.rs @@ -0,0 +1,70 @@ +//! router module + +use std::ops::ControlFlow; + +use tower::BoxError; + +use super::ErrorDetails; +use crate::graphql::Error; +pub(crate) use crate::services::router::*; +use crate::Context; + +pub(crate) type FirstRequest = super::engine::RhaiRouterFirstRequest; +pub(crate) type ChunkedRequest = super::engine::RhaiRouterChunkedRequest; +pub(crate) type FirstResponse = super::engine::RhaiRouterResponse; +pub(crate) type DeferredResponse = super::engine::RhaiRouterChunkedResponse; + +pub(super) fn request_failure( + context: Context, + error_details: ErrorDetails, +) -> Result, BoxError> { + let res = if let Some(body) = error_details.body { + crate::services::router::Response::builder() + .extensions(body.extensions) + .errors(body.errors) + .status_code(error_details.status) + .context(context) + .and_data(body.data) + .and_label(body.label) + .and_path(body.path) + .build()? + } else { + crate::services::router::Response::error_builder() + .errors(vec![Error { + message: error_details.message.unwrap_or_default(), + ..Default::default() + }]) + .context(context) + .status_code(error_details.status) + .build()? + }; + + Ok(ControlFlow::Break(res)) +} + +pub(super) fn response_failure( + context: Context, + error_details: ErrorDetails, +) -> crate::services::router::Response { + if let Some(body) = error_details.body { + crate::services::router::Response::builder() + .extensions(body.extensions) + .errors(body.errors) + .status_code(error_details.status) + .context(context) + .and_data(body.data) + .and_label(body.label) + .and_path(body.path) + .build() + } else { + crate::services::router::Response::error_builder() + .errors(vec![Error { + message: error_details.message.unwrap_or_default(), + ..Default::default() + }]) + .status_code(error_details.status) + .context(context) + .build() + } + .expect("can't fail to build our error message") +} diff --git a/apollo-router/src/plugins/rhai/subgraph.rs b/apollo-router/src/plugins/rhai/subgraph.rs index 22da7cc63b..110dce38d5 100644 --- a/apollo-router/src/plugins/rhai/subgraph.rs +++ b/apollo-router/src/plugins/rhai/subgraph.rs @@ -1,3 +1,62 @@ //! subgraph module +use std::ops::ControlFlow; + +use tower::BoxError; + +use super::ErrorDetails; +use crate::graphql::Error; pub(crate) use crate::services::subgraph::*; +use crate::Context; + +pub(super) fn request_failure( + context: Context, + error_details: ErrorDetails, +) -> Result, BoxError> { + let res = if let Some(body) = error_details.body { + Response::builder() + .extensions(body.extensions) + .errors(body.errors) + .status_code(error_details.status) + .context(context) + .and_data(body.data) + .and_label(body.label) + .and_path(body.path) + .build() + } else { + Response::error_builder() + .errors(vec![Error { + message: error_details.message.unwrap_or_default(), + ..Default::default() + }]) + .context(context) + .status_code(error_details.status) + .build()? + }; + + Ok(ControlFlow::Break(res)) +} + +pub(super) fn response_failure(context: Context, error_details: ErrorDetails) -> Response { + if let Some(body) = error_details.body { + Response::builder() + .extensions(body.extensions) + .errors(body.errors) + .status_code(error_details.status) + .context(context) + .and_data(body.data) + .and_label(body.label) + .and_path(body.path) + .build() + } else { + Response::error_builder() + .errors(vec![Error { + message: error_details.message.unwrap_or_default(), + ..Default::default() + }]) + .status_code(error_details.status) + .context(context) + .build() + .expect("can't fail to build our error message") + } +} diff --git a/apollo-router/src/plugins/rhai/supergraph.rs b/apollo-router/src/plugins/rhai/supergraph.rs index 01cc448b96..9f2905ab62 100644 --- a/apollo-router/src/plugins/rhai/supergraph.rs +++ b/apollo-router/src/plugins/rhai/supergraph.rs @@ -1,5 +1,65 @@ //! supergraph module +use std::ops::ControlFlow; + +use tower::BoxError; + +use super::ErrorDetails; +use crate::graphql::Error; pub(crate) use crate::services::supergraph::*; -pub(crate) type Response = super::engine::RhaiSupergraphResponse; +use crate::Context; + +pub(crate) type FirstResponse = super::engine::RhaiSupergraphResponse; pub(crate) type DeferredResponse = super::engine::RhaiSupergraphDeferredResponse; + +pub(super) fn request_failure( + context: Context, + error_details: ErrorDetails, +) -> Result, BoxError> { + let res = if let Some(body) = error_details.body { + Response::builder() + .extensions(body.extensions) + .errors(body.errors) + .status_code(error_details.status) + .context(context) + .and_data(body.data) + .and_label(body.label) + .and_path(body.path) + .build()? + } else { + Response::error_builder() + .errors(vec![Error { + message: error_details.message.unwrap_or_default(), + ..Default::default() + }]) + .context(context) + .status_code(error_details.status) + .build()? + }; + + Ok(ControlFlow::Break(res)) +} + +pub(super) fn response_failure(context: Context, error_details: ErrorDetails) -> Response { + if let Some(body) = error_details.body { + Response::builder() + .extensions(body.extensions) + .errors(body.errors) + .status_code(error_details.status) + .context(context) + .and_data(body.data) + .and_label(body.label) + .and_path(body.path) + .build() + } else { + Response::error_builder() + .errors(vec![Error { + message: error_details.message.unwrap_or_default(), + ..Default::default() + }]) + .status_code(error_details.status) + .context(context) + .build() + } + .expect("can't fail to build our error message") +} diff --git a/apollo-router/src/plugins/rhai/tests.rs b/apollo-router/src/plugins/rhai/tests.rs index a2e5fee18e..bb56ceb654 100644 --- a/apollo-router/src/plugins/rhai/tests.rs +++ b/apollo-router/src/plugins/rhai/tests.rs @@ -21,16 +21,16 @@ use super::process_error; use super::subgraph; use super::PathBuf; use super::Rhai; -use super::RhaiExecutionDeferredResponse; -use super::RhaiExecutionResponse; -use super::RhaiSupergraphDeferredResponse; -use super::RhaiSupergraphResponse; use crate::graphql::Error; use crate::graphql::Request; use crate::http_ext; use crate::plugin::test::MockExecutionService; use crate::plugin::test::MockSupergraphService; use crate::plugin::DynPlugin; +use crate::plugins::rhai::engine::RhaiExecutionDeferredResponse; +use crate::plugins::rhai::engine::RhaiExecutionResponse; +use crate::plugins::rhai::engine::RhaiSupergraphDeferredResponse; +use crate::plugins::rhai::engine::RhaiSupergraphResponse; use crate::services::ExecutionRequest; use crate::services::SubgraphRequest; use crate::services::SupergraphRequest; diff --git a/apollo-router/tests/fixtures/test_callbacks.rhai b/apollo-router/tests/fixtures/test_callbacks.rhai index 340cde9d5e..7ded5601d9 100644 --- a/apollo-router/tests/fixtures/test_callbacks.rhai +++ b/apollo-router/tests/fixtures/test_callbacks.rhai @@ -1,7 +1,7 @@ // This is a test used to make sure each callback is called -fn supergraph_service(service) { - log_info("supergraph_service setup"); +fn router_service(service) { + log_info("router_service setup"); service.map_request(|request| { log_info("from_router_request"); }); @@ -10,6 +10,16 @@ fn supergraph_service(service) { }); } +fn supergraph_service(service) { + log_info("supergraph_service setup"); + service.map_request(|request| { + log_info("from_supergraph_request"); + }); + service.map_response(|response| { + log_info("from_supergraph_response"); + }); +} + fn execution_service(service) { log_info("execution_service setup"); service.map_request(|request| { diff --git a/apollo-router/tests/rhai_tests.rs b/apollo-router/tests/rhai_tests.rs index e1578ae423..d8ff1aecf9 100644 --- a/apollo-router/tests/rhai_tests.rs +++ b/apollo-router/tests/rhai_tests.rs @@ -46,9 +46,12 @@ async fn all_rhai_callbacks_are_invoked() { .unwrap(); dbg!(_response); for expected_log in [ - "supergraph_service setup", + "router_service setup", "from_router_request", "from_router_response", + "supergraph_service setup", + "from_supergraph_request", + "from_supergraph_response", "execution_service setup", "from_execution_request", "from_execution_response", diff --git a/docs/source/customizations/rhai-api.mdx b/docs/source/customizations/rhai-api.mdx index 5496a24da9..83775e96e5 100644 --- a/docs/source/customizations/rhai-api.mdx +++ b/docs/source/customizations/rhai-api.mdx @@ -12,6 +12,7 @@ This article documents symbols and behaviors that are specific to [Rhai customiz Your Rhai script's [main file](./rhai/#the-main-file) hooks into the individual services of the Apollo Router's [request-handling lifecycle](./rhai/#router-request-lifecycle). To do so, it defines whichever combination of the following entry point hooks it requires: ```rhai +fn router_service(service) {} fn supergraph_service(service) {} fn execution_service(service) {} fn subgraph_service(service, subgraph) {} @@ -192,6 +193,31 @@ fn supergraph_service(service) { } ``` +## json encode/decode strings + +Your Rhai customization can use the functions `json_encode()` and `json_decode()` to convert Rhai objects to/from valid JSON encoded strings. Both functions can fail, so always handle exceptions when using them. + +```rhai +fn router_service(service) { + let original = `{"valid":"object"}`; + try { + let encoded = json_decode(original); + // encoded is a Rhai object, with a property (or key) named valid with a String value of "object" + print(`encoded.valid: ${encoded.valid}`); + let and_back = json_encode(encoded); + // and_back will be a string == original. + if and_back != original { + throw "something has gone wrong"; + } + } + catch(err) + { + // log any errors + log_error(`json coding error: ${err}`); + } +} +``` + ## base64 encode/decode strings Your Rhai customization can use the functions `base64::encode()` and `base64::decode()` to encode/decode strings. `encode()` does not fail, but `decode()` can fail, so always handle exceptions when using the `decode()` function. @@ -464,9 +490,10 @@ The following fields are identical in behavior to their `request` counterparts: * [`context`](#requestcontext) * [`headers`](#requestheaders) +* [`body`](#requestbody) * [`body.extensions`](#requestbodyextensions) -Note: Be particularly careful when interacting with headers in a response context. For supergraph_service() and execution_service(), response headers only exist for the first response in a deferred response stream. You can handle this by making use of the `is_primary()` function which will return true if a response is the first (or primary) response. If you do try to access the headers in a non-primary response, then you'll raise an exception which can be handled like any other rhai exception, but is not so convenient as using the `is_primary()` method. +Note: Be particularly careful when interacting with headers in a response context. For router_service(), supergraph_service() and execution_service(), response headers only exist for the first response in a deferred response stream. You can handle this by making use of the `is_primary()` function which will return true if a response is the first (or primary) response. If you do try to access the headers in a non-primary response, then you'll raise an exception which can be handled like any other rhai exception, but is not so convenient as using the `is_primary()` method. ```rhai if response.is_primary() { diff --git a/docs/source/customizations/rhai.mdx b/docs/source/customizations/rhai.mdx index 157c58a57a..14fe4e62ba 100644 --- a/docs/source/customizations/rhai.mdx +++ b/docs/source/customizations/rhai.mdx @@ -263,8 +263,9 @@ Each hook in your Rhai script's [main file](#main-file) is passed a `service` ob ```mermaid graph LR; client(Client); - client --> supergraph(supergraph_service); + client --> router(router_service); subgraph Apollo Router + router --> supergraph(supergraph_service); supergraph --> execution(execution_service); execution --> subs_a(subgraph_service); execution --> subs_b(subgraph_service); @@ -286,8 +287,9 @@ Each hook in your Rhai script's [main file](#main-file) is passed a `service` ob graph RL; client(Client); subgraph Apollo Router - supergraph(supergraph_service); execution(execution_service); + supergraph(supergraph_service); + router(router_service); subs_a(subgraph_service); subs_b(subgraph_service); end; @@ -298,13 +300,14 @@ Each hook in your Rhai script's [main file](#main-file) is passed a `service` ob subs_a --> execution; subs_b --> execution; execution --> supergraph; - supergraph --> client; + supergraph --> router;; + router --> client; class client,sub_a,sub_b secondary; ``` First, callbacks for `subgraph_service` are each passed the `response` from the corresponding subgraph. - Afterward, callbacks for `execution_service` and then `supergraph_service` are passed the combined `response` for the client that's assembled from all subgraph `response`s. + Afterward, callbacks for `execution_service`, `supergraph_service` and then `router_service` are passed the combined `response` for the client that's assembled from all subgraph `response`s. From 214016cbf8ef19bf489735146365516f3284dadb Mon Sep 17 00:00:00 2001 From: o0Ignition0o Date: Tue, 5 Sep 2023 12:59:10 +0200 Subject: [PATCH 21/50] add geoffroy to changeset --- .changesets/fix_igni_typename_fragment_interfaces.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changesets/fix_igni_typename_fragment_interfaces.md b/.changesets/fix_igni_typename_fragment_interfaces.md index 634bb740b0..5ed53a9ee9 100644 --- a/.changesets/fix_igni_typename_fragment_interfaces.md +++ b/.changesets/fix_igni_typename_fragment_interfaces.md @@ -2,4 +2,4 @@ Operations would over rely on the presence of __typename to resolve selection sets on interface implementers. This changeset checks for the parent type in an InlineFragment, so we don't drop relevant selection set when applicable. -By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/3718 +By [@o0Ignition0o](https://github.com/o0Ignition0o) and [@geal](https://github.com/geal) in https://github.com/apollographql/router/pull/3718 From b58a67c92d65b00c6c11c246e220dce9e2755de1 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Tue, 5 Sep 2023 14:30:37 +0200 Subject: [PATCH 22/50] Do not record a trace if telemetry is not configured (#2999) This introduces a `SamplingFilter` that wraps `OpenTelemetryLayer`. The layer has an overhead on every request, because it records data for each span, even if no exporters are set up. The filter handles sampling ahead of the layer, only sending a trace to the layer when it is actually needed, ie when it is sampled, and an exporter was configured. This also reduces the overhead of sampling, by managing it outside of the OpenTelemetryLayer. It is configured through a sampling ratio stored in an atomic u64, that is modifed when the telemetry configuration is activated --- .changesets/fix_geal_filter_events_too.md | 6 + Cargo.lock | 1 + apollo-router/Cargo.toml | 1 + apollo-router/src/plugins/telemetry/mod.rs | 72 ++++++---- apollo-router/src/plugins/telemetry/reload.rs | 136 ++++++++++++++++-- apollo-router/src/router_factory.rs | 6 +- .../fixtures/jaeger-0.5-sample.router.yaml | 29 ++++ apollo-router/tests/jaeger_test.rs | 28 ++++ 8 files changed, 236 insertions(+), 43 deletions(-) create mode 100644 .changesets/fix_geal_filter_events_too.md create mode 100644 apollo-router/tests/fixtures/jaeger-0.5-sample.router.yaml diff --git a/.changesets/fix_geal_filter_events_too.md b/.changesets/fix_geal_filter_events_too.md new file mode 100644 index 0000000000..579d457ea6 --- /dev/null +++ b/.changesets/fix_geal_filter_events_too.md @@ -0,0 +1,6 @@ +### Do not record a trace if telemetry is not configured + +The OpenTelemetry handling code had a constant overhead on every request, due to the OpenTelemetryLayer recording data for every span, even when telemetry is not actually set up. We introduce a sampling filter that disables it entirely when no exporters are configured, which provides a performance boost in basic setups. +It also provides performance gains when exporters are set up: if a sampling ratio or client defined sampling are used, then the filter will only send the sampled traces to the rest of the stack, thus reducing the overhead again. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2999 diff --git a/Cargo.lock b/Cargo.lock index 705e12df56..fb568ac1a0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -339,6 +339,7 @@ dependencies = [ "opentelemetry-prometheus", "opentelemetry-semantic-conventions", "opentelemetry-zipkin", + "opentelemetry_api", "p256 0.12.0", "parking_lot 0.12.1", "paste", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index c46cefb639..ab458424d5 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -134,6 +134,7 @@ once_cell = "1.18.0" # https://github.com/apollographql/router/pull/1509. A comment which exists # there (and on `tracing` packages below) should be updated should this change. opentelemetry = { version = "0.19.0", features = ["rt-tokio", "metrics"] } +opentelemetry_api = "0.19.0" opentelemetry-datadog = { version = "0.7.0", features = ["reqwest-client"] } opentelemetry-http = "0.8.0" opentelemetry-jaeger = { version = "0.18.0", features = [ diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index f6a8ba5202..c96157e4e4 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -48,7 +48,6 @@ use tokio::runtime::Handle; use tower::BoxError; use tower::ServiceBuilder; use tower::ServiceExt; -use tracing_opentelemetry::OpenTelemetryLayer; use tracing_opentelemetry::OpenTelemetrySpanExt; use tracing_subscriber::fmt::format::JsonFields; use tracing_subscriber::Layer; @@ -60,17 +59,19 @@ use self::apollo::SingleReport; use self::apollo_exporter::proto; use self::apollo_exporter::Sender; use self::config::Conf; +use self::config::Sampler; +use self::config::SamplerOption; use self::formatters::text::TextFormatter; use self::metrics::apollo::studio::SingleTypeStat; use self::metrics::AttributesForwardConf; use self::metrics::MetricsAttributesConf; use self::reload::reload_fmt; use self::reload::reload_metrics; -use self::reload::LayeredRegistry; +use self::reload::LayeredTracer; use self::reload::NullFieldFormatter; +use self::reload::SamplingFilter; use self::reload::OPENTELEMETRY_TRACER_HANDLE; use self::tracing::apollo_telemetry::APOLLO_PRIVATE_DURATION_NS; -use self::tracing::reload::ReloadTracer; use crate::axum_factory::utils::REQUEST_SPAN_NAME; use crate::context::OPERATION_NAME; use crate::layers::ServiceBuilderExt; @@ -104,6 +105,7 @@ use crate::plugins::telemetry::utils::TracingUtils; use crate::query_planner::OperationKind; use crate::register_plugin; use crate::router_factory::Endpoint; +use crate::services::apollo_key; use crate::services::execution; use crate::services::router; use crate::services::subgraph; @@ -156,6 +158,7 @@ pub(crate) struct Telemetry { custom_endpoints: MultiMap, apollo_metrics_sender: apollo_exporter::Sender, field_level_instrumentation_ratio: f64, + sampling_filter_ratio: SamplerOption, tracer_provider: Option, meter_provider: AggregateMeterProvider, @@ -241,14 +244,16 @@ impl Plugin for Telemetry { config.calculate_field_level_instrumentation_ratio()?; let mut metrics_builder = Self::create_metrics_builder(&config)?; let meter_provider = metrics_builder.meter_provider(); + let (sampling_filter_ratio, tracer_provider) = Self::create_tracer_provider(&config)?; Ok(Telemetry { custom_endpoints: metrics_builder.custom_endpoints(), metrics_exporters: metrics_builder.exporters(), metrics: BasicMetrics::new(&meter_provider), apollo_metrics_sender: metrics_builder.apollo_metrics_provider(), field_level_instrumentation_ratio, - tracer_provider: Some(Self::create_tracer_provider(&config)?), + tracer_provider: Some(tracer_provider), meter_provider, + sampling_filter_ratio, config: Arc::new(config), }) } @@ -541,6 +546,8 @@ impl Telemetry { // Only apply things if we were executing in the context of a vanilla the Apollo executable. // Users that are rolling their own routers will need to set up telemetry themselves. if let Some(hot_tracer) = OPENTELEMETRY_TRACER_HANDLE.get() { + SamplingFilter::configure(&self.sampling_filter_ratio); + // The reason that this has to happen here is that we are interacting with global state. // If we do this logic during plugin init then if a subsequent plugin fails to init then we // will already have set the new tracer provider and we will be in an inconsistent state. @@ -612,20 +619,41 @@ impl Telemetry { fn create_tracer_provider( config: &config::Conf, - ) -> Result { + ) -> Result<(SamplerOption, opentelemetry::sdk::trace::TracerProvider), BoxError> { let tracing_config = config.tracing.clone().unwrap_or_default(); - let trace_config = &tracing_config.trace_config.unwrap_or_default(); - let mut builder = - opentelemetry::sdk::trace::TracerProvider::builder().with_config(trace_config.into()); + let mut trace_config = tracing_config.trace_config.unwrap_or_default(); + let mut sampler = trace_config.sampler; + // set it to AlwaysOn: it is now done in the SamplingFilter, so whatever is sent to an exporter + // should be accepted + trace_config.sampler = SamplerOption::Always(Sampler::AlwaysOn); + + // if APOLLO_KEY was set, the Studio exporter must be active + let apollo_config = if config.apollo.is_none() && apollo_key().is_some() { + Some(Default::default()) + } else { + config.apollo.clone() + }; + + let mut builder = opentelemetry::sdk::trace::TracerProvider::builder() + .with_config((&trace_config).into()); - builder = setup_tracing(builder, &tracing_config.jaeger, trace_config)?; - builder = setup_tracing(builder, &tracing_config.zipkin, trace_config)?; - builder = setup_tracing(builder, &tracing_config.datadog, trace_config)?; - builder = setup_tracing(builder, &tracing_config.otlp, trace_config)?; - builder = setup_tracing(builder, &config.apollo, trace_config)?; + builder = setup_tracing(builder, &tracing_config.jaeger, &trace_config)?; + builder = setup_tracing(builder, &tracing_config.zipkin, &trace_config)?; + builder = setup_tracing(builder, &tracing_config.datadog, &trace_config)?; + builder = setup_tracing(builder, &tracing_config.otlp, &trace_config)?; + builder = setup_tracing(builder, &apollo_config, &trace_config)?; + + if tracing_config.jaeger.is_none() + && tracing_config.zipkin.is_none() + && tracing_config.datadog.is_none() + && tracing_config.otlp.is_none() + && apollo_config.is_none() + { + sampler = SamplerOption::Always(Sampler::AlwaysOff); + } let tracer_provider = builder.build(); - Ok(tracer_provider) + Ok((sampler, tracer_provider)) } fn create_metrics_builder(config: &config::Conf) -> Result { @@ -664,21 +692,7 @@ impl Telemetry { Ok(builder) } - #[allow(clippy::type_complexity)] - fn create_fmt_layer( - config: &config::Conf, - ) -> Box< - dyn Layer< - ::tracing_subscriber::layer::Layered< - OpenTelemetryLayer< - LayeredRegistry, - ReloadTracer<::opentelemetry::sdk::trace::Tracer>, - >, - LayeredRegistry, - >, - > + Send - + Sync, - > { + fn create_fmt_layer(config: &config::Conf) -> Box + Send + Sync> { let logging = &config.logging; let fmt = match logging.format { config::LoggingFormat::Pretty => tracing_subscriber::fmt::layer() diff --git a/apollo-router/src/plugins/telemetry/reload.rs b/apollo-router/src/plugins/telemetry/reload.rs index 50ce48747a..064763405d 100644 --- a/apollo-router/src/plugins/telemetry/reload.rs +++ b/apollo-router/src/plugins/telemetry/reload.rs @@ -1,21 +1,33 @@ +use std::sync::atomic::AtomicU64; +use std::sync::atomic::Ordering; + use anyhow::anyhow; use anyhow::Result; use once_cell::sync::OnceCell; use opentelemetry::metrics::noop::NoopMeterProvider; use opentelemetry::sdk::trace::Tracer; +use opentelemetry::trace::TraceContextExt; use opentelemetry::trace::TracerProvider; +use rand::thread_rng; +use rand::Rng; use tower::BoxError; +use tracing_core::Subscriber; use tracing_opentelemetry::OpenTelemetryLayer; +use tracing_subscriber::filter::Filtered; use tracing_subscriber::fmt::FormatFields; +use tracing_subscriber::layer::Filter; use tracing_subscriber::layer::Layer; use tracing_subscriber::layer::Layered; use tracing_subscriber::layer::SubscriberExt; +use tracing_subscriber::registry::LookupSpan; use tracing_subscriber::reload::Handle; use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::EnvFilter; use tracing_subscriber::Registry; +use super::config::SamplerOption; use super::metrics::span_metrics_exporter::SpanMetricsLayer; +use crate::axum_factory::utils::REQUEST_SPAN_NAME; use crate::plugins::telemetry::formatters::filter_metric_events; use crate::plugins::telemetry::formatters::text::TextFormatter; use crate::plugins::telemetry::formatters::FilteringFormatter; @@ -25,8 +37,14 @@ use crate::plugins::telemetry::tracing::reload::ReloadTracer; pub(crate) type LayeredRegistry = Layered; -type LayeredTracer = - Layered>, LayeredRegistry>; +pub(super) type LayeredTracer = Layered< + Filtered< + OpenTelemetryLayer>, + SamplingFilter, + LayeredRegistry, + >, + LayeredRegistry, +>; // These handles allow hot tracing of layers. They have complex type definitions because tracing has // generic types in the layer definition. @@ -52,11 +70,15 @@ static FMT_LAYER_HANDLE: OnceCell< Handle + Send + Sync>, LayeredTracer>, > = OnceCell::new(); +pub(super) static SPAN_SAMPLING_RATE: AtomicU64 = AtomicU64::new(0); + pub(crate) fn init_telemetry(log_level: &str) -> Result<()> { let hot_tracer = ReloadTracer::new( opentelemetry::sdk::trace::TracerProvider::default().versioned_tracer("noop", None, None), ); - let opentelemetry_layer = tracing_opentelemetry::layer().with_tracer(hot_tracer.clone()); + let opentelemetry_layer = tracing_opentelemetry::layer() + .with_tracer(hot_tracer.clone()) + .with_filter(SamplingFilter::new()); // We choose json or plain based on tty let fmt = if atty::is(atty::Stream::Stdout) { @@ -130,20 +152,110 @@ pub(super) fn reload_metrics(layer: MetricsLayer) { } } -#[allow(clippy::type_complexity)] -pub(super) fn reload_fmt( - layer: Box< - dyn Layer< - Layered>, LayeredRegistry>, - > + Send - + Sync, - >, -) { +pub(super) fn reload_fmt(layer: Box + Send + Sync>) { if let Some(handle) = FMT_LAYER_HANDLE.get() { handle.reload(layer).expect("fmt layer reload must succeed"); } } +pub(crate) struct SamplingFilter {} + +#[allow(dead_code)] +impl SamplingFilter { + pub(crate) fn new() -> Self { + Self {} + } + + pub(super) fn configure(sampler: &SamplerOption) { + let ratio = match sampler { + SamplerOption::TraceIdRatioBased(ratio) => { + // can't use std::cmp::min because f64 is not Ord + if *ratio > 1.0 { + 1.0 + } else { + *ratio + } + } + SamplerOption::Always(s) => match s { + super::config::Sampler::AlwaysOn => 1f64, + super::config::Sampler::AlwaysOff => 0f64, + }, + }; + + SPAN_SAMPLING_RATE.store(f64::to_bits(ratio), Ordering::Relaxed); + } + + fn sample(&self) -> bool { + let s: f64 = thread_rng().gen_range(0.0..=1.0); + s <= f64::from_bits(SPAN_SAMPLING_RATE.load(Ordering::Relaxed)) + } +} + +impl Filter for SamplingFilter +where + S: Subscriber + for<'span> LookupSpan<'span>, +{ + fn enabled( + &self, + meta: &tracing::Metadata<'_>, + cx: &tracing_subscriber::layer::Context<'_, S>, + ) -> bool { + // we ignore events + if !meta.is_span() { + return false; + } + + // if there's an exsting otel context set by the client request, and it is sampled, + // then that trace is sampled + let current_otel_context = opentelemetry_api::Context::current(); + if current_otel_context.span().span_context().is_sampled() { + return true; + } + + let current_span = cx.current_span(); + if let Some(spanref) = current_span + // the current span, which is the parent of the span that might get enabled here, + // exists, but it might have been enabled by another layer like metrics + .id() + .and_then(|id| cx.span(id)) + { + // if this extension is set, that means the parent span was accepted, and so the + // entire trace is accepted + let extensions = spanref.extensions(); + return extensions.get::().is_some(); + } + + // we only make the sampling decision on the root span. If we reach here for any other span, + // it means that the parent span was not enabled, so we should not enable this span either + if meta.name() != REQUEST_SPAN_NAME { + return false; + } + + // - there's no parent span (it's the root), so we make the sampling decision + self.sample() + } + + fn on_new_span( + &self, + _attrs: &tracing_core::span::Attributes<'_>, + id: &tracing_core::span::Id, + ctx: tracing_subscriber::layer::Context<'_, S>, + ) { + let span = ctx.span(id).expect("Span not found, this is a bug"); + let mut extensions = span.extensions_mut(); + if extensions.get_mut::().is_none() { + extensions.insert(SampledSpan); + } + } + + fn on_close(&self, id: tracing_core::span::Id, ctx: tracing_subscriber::layer::Context<'_, S>) { + let span = ctx.span(&id).expect("Span not found, this is a bug"); + let mut extensions = span.extensions_mut(); + extensions.remove::(); + } +} + +struct SampledSpan; /// prevents span fields from being formatted to a string when writing logs pub(crate) struct NullFieldFormatter; diff --git a/apollo-router/src/router_factory.rs b/apollo-router/src/router_factory.rs index 6236d106d2..40f14b5155 100644 --- a/apollo-router/src/router_factory.rs +++ b/apollo-router/src/router_factory.rs @@ -573,9 +573,11 @@ pub(crate) async fn create_plugins( fn inject_schema_id(schema: &Schema, configuration: &mut Value) { if configuration.get("apollo").is_none() { + /*FIXME: do we really need to set a default configuration for telemetry.apollo ? if let Some(telemetry) = configuration.as_object_mut() { telemetry.insert("apollo".to_string(), Value::Object(Default::default())); - } + }*/ + return; } if let (Some(schema_id), Some(apollo)) = ( &schema.api_schema().schema_id, @@ -730,7 +732,7 @@ mod test { fn test_inject_schema_id() { let schema = include_str!("testdata/starstuff@current.graphql"); let schema = Schema::parse_test(schema, &Default::default()).unwrap(); - let mut config = json!({}); + let mut config = json!({ "apollo": {} }); inject_schema_id(&schema, &mut config); let config = serde_json::from_value::(config).unwrap(); diff --git a/apollo-router/tests/fixtures/jaeger-0.5-sample.router.yaml b/apollo-router/tests/fixtures/jaeger-0.5-sample.router.yaml new file mode 100644 index 0000000000..a5a11b2829 --- /dev/null +++ b/apollo-router/tests/fixtures/jaeger-0.5-sample.router.yaml @@ -0,0 +1,29 @@ +telemetry: + tracing: + experimental_response_trace_id: + enabled: true + header_name: apollo-custom-trace-id + propagation: + jaeger: true + trace_config: + service_name: router + sampler: 0.5 + jaeger: + batch_processor: + scheduled_delay: 100ms + agent: + endpoint: default + experimental_logging: + when_header: + - name: apollo-router-log-request + value: test + headers: true # default: false + body: true # default: false + # log request for all requests coming from Iphones + - name: custom-header + match: ^foo.* + headers: true +override_subgraph_url: + products: http://localhost:4005 +include_subgraph_errors: + all: true diff --git a/apollo-router/tests/jaeger_test.rs b/apollo-router/tests/jaeger_test.rs index f1aef966dc..eb1a10f026 100644 --- a/apollo-router/tests/jaeger_test.rs +++ b/apollo-router/tests/jaeger_test.rs @@ -122,6 +122,34 @@ async fn test_local_root_no_sample() -> Result<(), BoxError> { Ok(()) } +#[tokio::test(flavor = "multi_thread")] +async fn test_local_root_50_percent_sample() -> Result<(), BoxError> { + let mut router = IntegrationTest::builder() + .telemetry(Telemetry::Jaeger) + .config(include_str!("fixtures/jaeger-0.5-sample.router.yaml")) + .build() + .await; + + router.start().await; + router.assert_started().await; + let query = json!({"query":"query ExampleQuery {topProducts{name}}\n","variables":{}, "operationName": "ExampleQuery"}); + + for _ in 0..100 { + let (id, result) = router.execute_untraced_query(&query).await; + + if result.headers().get("apollo-custom-trace-id").is_some() + && validate_trace(id, &query, Some("ExampleQuery"), &["router", "products"]) + .await + .is_ok() + { + router.graceful_shutdown().await; + + return Ok(()); + } + } + panic!("tried 100 requests with telemetry sampled at 50%, no traces were found") +} + #[tokio::test(flavor = "multi_thread")] #[ignore] async fn test_no_telemetry() -> Result<(), BoxError> { From ad07e8e25e0f88bcca5023222c38e6ad7e0c685a Mon Sep 17 00:00:00 2001 From: Coenen Benjamin Date: Tue, 5 Sep 2023 14:33:36 +0200 Subject: [PATCH 23/50] fix(subscription): add x-accel-buffering header for multipart response (#3749) Set `x-accel-buffering` to `no` when it's a multipart response because proxies need this configuration. Fixes #3683 --------- Signed-off-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> --- .changesets/fix_bnjjj_fix_3683.md | 5 +++++ apollo-router/src/services/router_service.rs | 7 ++++++- apollo-router/tests/subscription_load_test.rs | 5 +++++ 3 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 .changesets/fix_bnjjj_fix_3683.md diff --git a/.changesets/fix_bnjjj_fix_3683.md b/.changesets/fix_bnjjj_fix_3683.md new file mode 100644 index 0000000000..ff130ff6f4 --- /dev/null +++ b/.changesets/fix_bnjjj_fix_3683.md @@ -0,0 +1,5 @@ +### fix(subscription): add x-accel-buffering header for multipart response ([Issue #3683](https://github.com/apollographql/router/issues/3683)) + +Set `x-accel-buffering` to `no` when it's a multipart response because proxies need this configuration. + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/3749 diff --git a/apollo-router/src/services/router_service.rs b/apollo-router/src/services/router_service.rs index 394f95fcbc..25c27a8b25 100644 --- a/apollo-router/src/services/router_service.rs +++ b/apollo-router/src/services/router_service.rs @@ -13,6 +13,7 @@ use futures::stream::StreamExt; use http::header::CONTENT_TYPE; use http::header::VARY; use http::HeaderMap; +use http::HeaderName; use http::HeaderValue; use http::Method; use http::StatusCode; @@ -306,6 +307,11 @@ impl RouterService { HeaderValue::from_static(MULTIPART_SUBSCRIPTION_CONTENT_TYPE), ); } + // Useful when you're using a proxy like nginx which enable proxy_buffering by default (http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffering) + parts.headers.insert( + HeaderName::from_static("x-accel-buffering"), + HeaderValue::from_static("no"), + ); let multipart_stream = match response.subscribed { Some(true) => { StreamBody::new(Multipart::new(body, ProtocolMode::Subscription)) @@ -315,7 +321,6 @@ impl RouterService { ProtocolMode::Defer, )), }; - let response = (parts, multipart_stream).into_response().map(|body| { // Axum makes this `body` have type: // https://docs.rs/http-body/0.4.5/http_body/combinators/struct.UnsyncBoxBody.html diff --git a/apollo-router/tests/subscription_load_test.rs b/apollo-router/tests/subscription_load_test.rs index 7ffbef9da0..d8e8f3d3bc 100644 --- a/apollo-router/tests/subscription_load_test.rs +++ b/apollo-router/tests/subscription_load_test.rs @@ -1,5 +1,6 @@ //! This file is to load test subscriptions and should be launched manually, not in our CI use futures::StreamExt; +use http::HeaderValue; use serde_json::json; use tower::BoxError; @@ -22,6 +23,10 @@ async fn test_subscription_load() -> Result<(), BoxError> { for i in 0..1000000i64 { let (_, response) = router.run_subscription(UNFEDERATED_SUB_QUERY).await; assert!(response.status().is_success()); + assert_eq!( + response.headers().get("x-accel-buffering").unwrap(), + &HeaderValue::from_static("no") + ); tokio::spawn(async move { let mut stream = response.bytes_stream(); From 4780e2128068e056447485a5ec2073a640e63db1 Mon Sep 17 00:00:00 2001 From: Simon Sapin Date: Tue, 5 Sep 2023 14:36:31 +0200 Subject: [PATCH 24/50] Upgrade webpki and rustls-webpki crates (#3728) Brings fixes for: * https://rustsec.org/advisories/RUSTSEC-2023-0052 * https://rustsec.org/advisories/RUSTSEC-2023-0053 Fix #3645 Because Apollo Router does not accept client certificates, it could only be affected if a subgraph supplied a pathological TLS server certificate. --------- Co-authored-by: Geoffroy Couprie --- .changesets/maint_simon_pkiup.md | 11 +++++++++++ Cargo.lock | 4 ++-- deny.toml | 5 ++--- xtask/Cargo.lock | 8 ++++---- 4 files changed, 19 insertions(+), 9 deletions(-) create mode 100644 .changesets/maint_simon_pkiup.md diff --git a/.changesets/maint_simon_pkiup.md b/.changesets/maint_simon_pkiup.md new file mode 100644 index 0000000000..8e606d8631 --- /dev/null +++ b/.changesets/maint_simon_pkiup.md @@ -0,0 +1,11 @@ +### Upgrade webpki and rustls-webpki crates ([PR #3728](https://github.com/apollographql/router/pull/3728)) + +Brings fixes for: + +* https://rustsec.org/advisories/RUSTSEC-2023-0052 +* https://rustsec.org/advisories/RUSTSEC-2023-0053 + +Because Apollo Router does not accept client certificates, it could only be affected +if a subgraph supplied a pathological TLS server certificate. + +By [@SimonSapin](https://github.com/SimonSapin) in https://github.com/apollographql/router/pull/3728 diff --git a/Cargo.lock b/Cargo.lock index fb568ac1a0..cbcbc5699d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7200,9 +7200,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.22.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +checksum = "f0e74f82d49d545ad128049b7e88f6576df2da6b02e9ce565c6f533be576957e" dependencies = [ "ring", "untrusted", diff --git a/deny.toml b/deny.toml index 084bb9a883..45dc48653d 100644 --- a/deny.toml +++ b/deny.toml @@ -26,9 +26,8 @@ git-fetch-with-cli = true # A list of advisory IDs to ignore. Note that ignored advisories will still # output a note when they are encountered. -# RUSTSEC-2023-0052 and RUSTSEC-2023-0053 are pending a webpki update that is tracked by https://github.com/apollographql/router/issues/3645 -# and will be fixed by https://github.com/apollographql/router/pull/3643 -ignore = ["RUSTSEC-2023-0053", "RUSTSEC-2023-0052"] +# rustsec advisory exemptions +ignore = [] # This section is considered when running `cargo deny check licenses` # More documentation for the licenses section can be found here: diff --git a/xtask/Cargo.lock b/xtask/Cargo.lock index bdf077abae..a94d3cae59 100644 --- a/xtask/Cargo.lock +++ b/xtask/Cargo.lock @@ -1196,9 +1196,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.101.3" +version = "0.101.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "261e9e0888cba427c3316e6322805653c9425240b6fd96cee7cb671ab70ab8d0" +checksum = "7d93931baf2d282fff8d3a532bbfd7653f734643161b87e3e01e59a04439bf0d" dependencies = [ "ring", "untrusted", @@ -1732,9 +1732,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.22.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +checksum = "f0e74f82d49d545ad128049b7e88f6576df2da6b02e9ce565c6f533be576957e" dependencies = [ "ring", "untrusted", From d751af63e8e5cf3cde2dbbcdb632c4181461137f Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 5 Sep 2023 14:41:47 +0200 Subject: [PATCH 25/50] fix(deps): update rust crate router-bridge to v0.5.5+v2.5.4 (#3717) This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [router-bridge](https://www.apollographql.com/apollo-federation/) ([source](https://togithub.com/apollographql/federation)) | dependencies | patch | `=0.5.4+v2.5.3` -> `=0.5.5+v2.5.4` | --- Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- apollo-router/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cbcbc5699d..9032b007f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5153,9 +5153,9 @@ dependencies = [ [[package]] name = "router-bridge" -version = "0.5.4+v2.5.3" +version = "0.5.5+v2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82d3e1bfc37d92eab53edcd17d4290b5aa8fb95ab43d0408f5d9b56157a6d61c" +checksum = "a33cdf930b79165fd6a0d3b94ccd930162103255db0dd1a7dd6625568b347539" dependencies = [ "anyhow", "async-channel", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index ab458424d5..33921ff1c6 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -172,7 +172,7 @@ reqwest = { version = "0.11.19", default-features = false, features = [ "stream", ] } # note: this dependency should _always_ be pinned, prefix the version with an `=` -router-bridge = "=0.5.4+v2.5.3" +router-bridge = "=0.5.5+v2.5.4" rust-embed = "6.8.1" rustls = "0.21.6" rustls-pemfile = "1.0.3" From a08e48727dae04e38635033a7acdde000ee46ea5 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Tue, 5 Sep 2023 14:49:18 +0200 Subject: [PATCH 26/50] GraphOS authorization: add an example of scope manipulation with router service level rhai (#3719) Co-authored-by: Maria Elisabeth Schreiber Co-authored-by: Edward Huang <18322228+shorgi@users.noreply.github.com> --- .../docs_geal_authorization_router_rhai.md | 5 +++ docs/source/configuration/authorization.mdx | 33 +++++++++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 .changesets/docs_geal_authorization_router_rhai.md diff --git a/.changesets/docs_geal_authorization_router_rhai.md b/.changesets/docs_geal_authorization_router_rhai.md new file mode 100644 index 0000000000..0e3596252b --- /dev/null +++ b/.changesets/docs_geal_authorization_router_rhai.md @@ -0,0 +1,5 @@ +### GraphOS authorization: add an example of scope manipulation with router service level rhai ([PR #3719](https://github.com/apollographql/router/pull/3719)) + +The router authorization directive `@requiresScopes` expects scopes to come from the `scope` claim in the OAuth2 access token format ( https://datatracker.ietf.org/doc/html/rfc6749#section-3.3 ). Some tokens may have scopes stored in a different way, like an array of strings, or even in different claims. This documents a way to extract the scopes and prepare them in the right format for consumption by `@requiresScopes`, ushing Rhai. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3719 \ No newline at end of file diff --git a/docs/source/configuration/authorization.mdx b/docs/source/configuration/authorization.mdx index 1b6d1e47f8..1a21b54a1c 100644 --- a/docs/source/configuration/authorization.mdx +++ b/docs/source/configuration/authorization.mdx @@ -140,6 +140,39 @@ claims = context["apollo_authentication::JWT::claims"] claims["scope"] = "scope1 scope2 scope3" ``` + + +If the `apollo_authentication::JWT::claims` object holds scopes in another format, for example, an array of strings, or at a key other than `"scope"`, you can edit the claims with a [Rhai script](../customizations/rhai). + +The example below extracts an array of scopes from the `"roles"` claim and reformats them as a space-separated string. + +```Rhai +fn router_service(service) { + let request_callback = |request| { + let claims = request.context["apollo_authentication::JWT::claims"]; + let roles = claims["roles"]; + + let scope = ""; + if roles.len() > 1 { + scope = roles[0]; + } + + if roles.len() > 2 { + for role in roles[1..] { + scope += ' '; + scope += role; + } + } + + claims["scope"] = scope; + request.context["apollo_authentication::JWT::claims"] = claims; + }; + service.map_request(request_callback); +} +``` + + + #### Usage To use the `@requiresScopes` directive in a subgraph, you can [import it from the `@link` directive](/federation/federated-types/federated-directives/#importing-directives) like so: From 75c9b00bcd7e1fae711acdd845fe9cff3d76be02 Mon Sep 17 00:00:00 2001 From: Coenen Benjamin Date: Tue, 5 Sep 2023 15:56:49 +0200 Subject: [PATCH 27/50] fix: handle correctly multipart stream if the original stream is empty (#3748) Fixes #3293 --------- Signed-off-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> Co-authored-by: Jeremy Lempereur --- .../fix_bnjjj_fix_empty_multipart_stream.md | 5 ++ apollo-router/src/protocols/multipart.rs | 74 +++++++++++++++++-- 2 files changed, 72 insertions(+), 7 deletions(-) create mode 100644 .changesets/fix_bnjjj_fix_empty_multipart_stream.md diff --git a/.changesets/fix_bnjjj_fix_empty_multipart_stream.md b/.changesets/fix_bnjjj_fix_empty_multipart_stream.md new file mode 100644 index 0000000000..68aa0de28a --- /dev/null +++ b/.changesets/fix_bnjjj_fix_empty_multipart_stream.md @@ -0,0 +1,5 @@ +### Handle multipart stream if the original stream is empty ([Issue #3293](https://github.com/apollographql/router/issues/3293)) + +For subscription and defer, in case the multipart response stream is empty then it should end correctly. + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/3748 \ No newline at end of file diff --git a/apollo-router/src/protocols/multipart.rs b/apollo-router/src/protocols/multipart.rs index b617c5ff50..59cb5f3212 100644 --- a/apollo-router/src/protocols/multipart.rs +++ b/apollo-router/src/protocols/multipart.rs @@ -8,6 +8,7 @@ use futures::stream::StreamExt; use futures::Stream; use serde::Serialize; use serde_json_bytes::Value; +use tokio_stream::once; use tokio_stream::wrappers::IntervalStream; use crate::graphql; @@ -36,8 +37,14 @@ struct SubscriptionPayload { errors: Vec, } +enum MessageKind { + Heartbeat, + Message(graphql::Response), + Eof, +} + pub(crate) struct Multipart { - stream: Pin> + Send>>, + stream: Pin + Send>>, is_first_chunk: bool, is_terminated: bool, mode: ProtocolMode, @@ -50,11 +57,14 @@ impl Multipart { { let stream = match mode { ProtocolMode::Subscription => select( - stream.map(Some), - IntervalStream::new(tokio::time::interval(HEARTBEAT_INTERVAL)).map(|_| None), + stream + .map(MessageKind::Message) + .chain(once(MessageKind::Eof)), + IntervalStream::new(tokio::time::interval(HEARTBEAT_INTERVAL)) + .map(|_| MessageKind::Heartbeat), ) .boxed(), - ProtocolMode::Defer => stream.map(Some).boxed(), + ProtocolMode::Defer => stream.map(MessageKind::Message).boxed(), }; Self { @@ -78,7 +88,7 @@ impl Stream for Multipart { } match self.stream.as_mut().poll_next(cx) { Poll::Ready(message) => match message { - Some(None) => { + Some(MessageKind::Heartbeat) => { // It's the ticker for heartbeat for subscription let buf = if self.is_first_chunk { self.is_first_chunk = false; @@ -93,7 +103,7 @@ impl Stream for Multipart { Poll::Ready(Some(Ok(buf))) } - Some(Some(mut response)) => { + Some(MessageKind::Message(mut response)) => { let mut buf = if self.is_first_chunk { self.is_first_chunk = false; Vec::from(&b"\r\n--graphql\r\ncontent-type: application/json\r\n\r\n"[..]) @@ -132,7 +142,26 @@ impl Stream for Multipart { Poll::Ready(Some(Ok(buf.into()))) } - None => Poll::Ready(None), + Some(MessageKind::Eof) => { + // If the stream ends or is empty + let buf = if self.is_first_chunk { + self.is_first_chunk = false; + Bytes::from_static( + &b"\r\n--graphql\r\ncontent-type: application/json\r\n\r\n{}\r\n--graphql--\r\n"[..] + ) + } else { + Bytes::from_static( + &b"content-type: application/json\r\n\r\n{}\r\n--graphql--\r\n"[..], + ) + }; + self.is_terminated = true; + + Poll::Ready(Some(Ok(buf))) + } + None => { + self.is_terminated = true; + Poll::Ready(None) + } }, Poll::Pending => Poll::Pending, } @@ -205,4 +234,35 @@ mod tests { } } } + + #[tokio::test] + async fn test_empty_stream() { + let responses = vec![]; + let gql_responses = stream::iter(responses); + + let mut protocol = Multipart::new(gql_responses, ProtocolMode::Subscription); + let heartbeat = String::from( + "\r\n--graphql\r\ncontent-type: application/json\r\n\r\n{}\r\n--graphql\r\n", + ); + let mut curr_index = 0; + while let Some(resp) = protocol.next().await { + let res = dbg!(String::from_utf8(resp.unwrap().to_vec()).unwrap()); + if res == heartbeat { + continue; + } else { + match curr_index { + 0 => { + assert_eq!( + res, + "\r\n--graphql\r\ncontent-type: application/json\r\n\r\n{}\r\n--graphql--\r\n" + ); + } + _ => { + panic!("should not happen, test failed"); + } + } + curr_index += 1; + } + } + } } From d32eeb682cf7a13d5118956875ccaf5b5b1e8ad8 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Tue, 5 Sep 2023 15:57:13 +0200 Subject: [PATCH 28/50] fix authenticated directive reporting (#3753) This is not a security issue, it only affects usage reporting for the `@authenticated` directive --- .changesets/fix_geal_fix_authenticated_reporting.md | 5 +++++ apollo-router/src/plugins/authorization/mod.rs | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 .changesets/fix_geal_fix_authenticated_reporting.md diff --git a/.changesets/fix_geal_fix_authenticated_reporting.md b/.changesets/fix_geal_fix_authenticated_reporting.md new file mode 100644 index 0000000000..ff189e1d7f --- /dev/null +++ b/.changesets/fix_geal_fix_authenticated_reporting.md @@ -0,0 +1,5 @@ +### Fix authenticated directive reporting ([PR #3753](https://github.com/apollographql/router/pull/3753)) + +The context key for the `@authenticated` directive only affects usage reporting + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3753 \ No newline at end of file diff --git a/apollo-router/src/plugins/authorization/mod.rs b/apollo-router/src/plugins/authorization/mod.rs index 20bfe91db4..babcc91c1b 100644 --- a/apollo-router/src/plugins/authorization/mod.rs +++ b/apollo-router/src/plugins/authorization/mod.rs @@ -141,7 +141,7 @@ impl AuthorizationPlugin { // if this fails, the query is invalid and will fail at the query planning phase. // We do not return validation errors here for now because that would imply a huge // refactoring of telemetry and tests - if traverse::document(&mut visitor, file_id).is_ok() && !visitor.found { + if traverse::document(&mut visitor, file_id).is_ok() && visitor.found { context.insert(AUTHENTICATED_KEY, true).unwrap(); } From 07e69593ee321f1a2068074ad8ecf133e4af9db3 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 5 Sep 2023 14:25:19 +0000 Subject: [PATCH 29/50] fix(deps): update rust crate walkdir to 2.4.0 --- Cargo.lock | 4 ++-- apollo-router/Cargo.toml | 2 +- xtask/Cargo.lock | 4 ++-- xtask/Cargo.toml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9032b007f3..44bc38cd31 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7080,9 +7080,9 @@ checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" [[package]] name = "walkdir" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" +checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" dependencies = [ "same-file", "winapi-util", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 33921ff1c6..7e8d6fed3e 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -297,7 +297,7 @@ tracing-subscriber = { version = "0.3", default-features = false, features = [ "fmt", ] } tracing-test = "0.2.4" -walkdir = "2.3.3" +walkdir = "2.4.0" wiremock = "0.5.19" [target.'cfg(target_os = "linux")'.dev-dependencies] diff --git a/xtask/Cargo.lock b/xtask/Cargo.lock index a94d3cae59..6bd8d563b2 100644 --- a/xtask/Cargo.lock +++ b/xtask/Cargo.lock @@ -1625,9 +1625,9 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" [[package]] name = "walkdir" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" +checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" dependencies = [ "same-file", "winapi-util", diff --git a/xtask/Cargo.toml b/xtask/Cargo.toml index b19d08afac..90412259fe 100644 --- a/xtask/Cargo.toml +++ b/xtask/Cargo.toml @@ -39,7 +39,7 @@ tempfile = "3" tinytemplate = "1.2.1" tokio = "1.32.0" which = "4" -walkdir = "2.3.3" +walkdir = "2.4.0" [target.'cfg(target_os = "macos")'.dependencies] base64 = "0.21" From 83597647dadf9acfce5dbdd1c0ad300059304631 Mon Sep 17 00:00:00 2001 From: Nicolas Moutschen Date: Wed, 6 Sep 2023 10:56:16 +0200 Subject: [PATCH 30/50] feat(apollo-router): add support for GraphOS Cloud metrics (#3761) Fixes #3760 --- .changesets/maint_nicolas_otlp_filter.md | 5 +++++ apollo-router/src/plugins/telemetry/metrics/filter.rs | 9 ++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) create mode 100644 .changesets/maint_nicolas_otlp_filter.md diff --git a/.changesets/maint_nicolas_otlp_filter.md b/.changesets/maint_nicolas_otlp_filter.md new file mode 100644 index 0000000000..7bb2f7070c --- /dev/null +++ b/.changesets/maint_nicolas_otlp_filter.md @@ -0,0 +1,5 @@ +### Add support GraphOS Cloud metrics ([Issue #3760](https://github.com/apollographql/router/issues/3760)) + +Add support for GraphOS Cloud metrics in the Apollo OTLP Exporter. + +By [@nmoutschen](https://github.com/nmoutschen) in https://github.com/apollographql/router/pull/3761 \ No newline at end of file diff --git a/apollo-router/src/plugins/telemetry/metrics/filter.rs b/apollo-router/src/plugins/telemetry/metrics/filter.rs index 549da8eeed..919a073cd7 100644 --- a/apollo-router/src/plugins/telemetry/metrics/filter.rs +++ b/apollo-router/src/plugins/telemetry/metrics/filter.rs @@ -37,7 +37,7 @@ impl FilterMeterProvider { FilterMeterProvider::builder() .delegate(delegate) .allow( - Regex::new(r"apollo\.router\.(operations?|config)(\..*|$)") + Regex::new(r"apollo\.(graphos\.cloud|router\.(operations?|config))(\..*|$)") .expect("regex should have been valid"), ) .build() @@ -212,6 +212,7 @@ mod test { .versioned_meter("filtered", None, None); filtered.u64_counter("apollo.router.operations").init(); filtered.u64_counter("apollo.router.operations.test").init(); + filtered.u64_counter("apollo.graphos.cloud.test").init(); filtered.u64_counter("apollo.router.unknown.test").init(); assert!(delegate .instrument_provider @@ -225,6 +226,12 @@ mod test { .lock() .unwrap() .contains(&("apollo.router.operations".to_string(), None, None))); + assert!(delegate + .instrument_provider + .counters_created + .lock() + .unwrap() + .contains(&("apollo.graphos.cloud.test".to_string(), None, None))); assert!(!delegate .instrument_provider .counters_created From 2e5d90f9d1d5b095926b6d20e9b9cc0d444f003d Mon Sep 17 00:00:00 2001 From: Bryn Cooke Date: Wed, 6 Sep 2023 11:36:49 +0100 Subject: [PATCH 31/50] Fix metrics attribute types (#3724) Metrics attributes were being coerced to strings. This is now fixed. In addition, the logic around types accepted as metrics attributes has been simplified. It will log and ignore values of the wrong type. Fixes: #3687 **Checklist** Complete the checklist (and note appropriate exceptions) before a final PR is raised. - [ ] Changes are compatible[^1] - [ ] Documentation[^2] completed - [ ] Performance impact assessed and acceptable - Tests added and passing[^3] - [ ] Unit Tests - [ ] Integration Tests - [ ] Manual Tests **Exceptions** *Note any exceptions here* **Notes** [^1]. It may be appropriate to bring upcoming changes to the attention of other (impacted) groups. Please endeavour to do this before seeking PR approval. The mechanism for doing this will vary considerably, so use your judgement as to how and when to do this. [^2]. Configuration is an important part of many changes. Where applicable please try to document configuration examples. [^3]. Tick whichever testing boxes are applicable. If you are adding Manual Tests: - please document the manual testing (extensively) in the Exceptions. - please raise a separate issue to automate the test and label it (or ask for it to be labeled) as `manual test` --------- Co-authored-by: bryn --- .changesets/fix_bryn_fix_metrics_typing.md | 6 + .../axum_factory/axum_http_server_factory.rs | 6 +- apollo-router/src/axum_factory/listeners.rs | 4 +- apollo-router/src/configuration/metrics.rs | 54 +++- ...etrics__test__metrics@apq.router.yaml.snap | 6 +- ...st__metrics@authorization.router.yaml.snap | 4 +- ...@authorization_directives.router.yaml.snap | 4 +- ...test__metrics@coprocessor.router.yaml.snap | 12 +- ...s__test__metrics@entities.router.yaml.snap | 18 +- ...ics__test__metrics@limits.router.yaml.snap | 16 +- ...metrics@persisted_queries.router.yaml.snap | 6 +- ...st__metrics@subscriptions.router.yaml.snap | 10 +- ...__test__metrics@telemetry.router.yaml.snap | 12 +- ...__metrics@traffic_shaping.router.yaml.snap | 16 +- .../src/plugins/telemetry/metrics/layer.rs | 251 +++++++++++++++--- apollo-router/src/plugins/telemetry/mod.rs | 4 +- .../plugins/traffic_shaping/timeout/future.rs | 2 +- .../src/query_planner/bridge_query_planner.rs | 12 +- apollo-router/src/uplink/mod.rs | 4 +- 19 files changed, 339 insertions(+), 108 deletions(-) create mode 100644 .changesets/fix_bryn_fix_metrics_typing.md diff --git a/.changesets/fix_bryn_fix_metrics_typing.md b/.changesets/fix_bryn_fix_metrics_typing.md new file mode 100644 index 0000000000..ab4d6eef03 --- /dev/null +++ b/.changesets/fix_bryn_fix_metrics_typing.md @@ -0,0 +1,6 @@ +### Fix metrics attribute types ([Issue #3687](https://github.com/apollographql/router/issues/3687)) + +Metrics attributes were being coerced to strings. This is now fixed. +In addition, the logic around types accepted as metrics attributes has been simplified. It will log and ignore values of the wrong type. + +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/3724 diff --git a/apollo-router/src/axum_factory/axum_http_server_factory.rs b/apollo-router/src/axum_factory/axum_http_server_factory.rs index 1ef5fc452f..0b414f25b1 100644 --- a/apollo-router/src/axum_factory/axum_http_server_factory.rs +++ b/apollo-router/src/axum_factory/axum_http_server_factory.rs @@ -500,7 +500,7 @@ async fn handle_graphql( service: router::BoxService, http_request: Request, ) -> impl IntoResponse { - tracing::info!(counter.apollo_router_session_count_active = 1,); + tracing::info!(counter.apollo_router_session_count_active = 1i64,); let request: router::Request = http_request.into(); let context = request.context.clone(); @@ -518,7 +518,7 @@ async fn handle_graphql( match res { Err(e) => { - tracing::info!(counter.apollo_router_session_count_active = -1,); + tracing::info!(counter.apollo_router_session_count_active = -1i64,); if let Some(source_err) = e.source() { if source_err.is::() { return RateLimited::new().into_response(); @@ -541,7 +541,7 @@ async fn handle_graphql( .into_response() } Ok(response) => { - tracing::info!(counter.apollo_router_session_count_active = -1,); + tracing::info!(counter.apollo_router_session_count_active = -1i64,); let (mut parts, body) = response.response.into_parts(); let opt_compressor = accept_encoding diff --git a/apollo-router/src/axum_factory/listeners.rs b/apollo-router/src/axum_factory/listeners.rs index 24160afc22..9237fd68db 100644 --- a/apollo-router/src/axum_factory/listeners.rs +++ b/apollo-router/src/axum_factory/listeners.rs @@ -218,7 +218,7 @@ pub(super) fn serve_router_on_listen_addr( } tracing::info!( - counter.apollo_router_session_count_total = 1, + counter.apollo_router_session_count_total = 1i64, listener = &address ); @@ -312,7 +312,7 @@ pub(super) fn serve_router_on_listen_addr( } tracing::info!( - counter.apollo_router_session_count_total = -1, + counter.apollo_router_session_count_total = -1i64, listener = &address ); diff --git a/apollo-router/src/configuration/metrics.rs b/apollo-router/src/configuration/metrics.rs index 0e8d5e74fb..eb459b92e9 100644 --- a/apollo-router/src/configuration/metrics.rs +++ b/apollo-router/src/configuration/metrics.rs @@ -5,6 +5,7 @@ use std::time::Duration; use jsonpath_rust::JsonPathInst; use paste::paste; +use serde::Serialize; use serde_json::Value; use tokio::sync::OwnedSemaphorePermit; @@ -16,7 +17,39 @@ pub(crate) struct MetricsHandle { pub(crate) struct Metrics { yaml: Value, - metrics: HashMap)>, + metrics: HashMap)>, +} + +enum AttributeValue { + Bool(bool), + U64(u64), + I64(i64), + F64(f64), + String(String), +} + +impl Serialize for AttributeValue { + fn serialize(&self, serializer: S) -> Result { + match self { + AttributeValue::Bool(value) => serializer.serialize_bool(*value), + AttributeValue::U64(value) => serializer.serialize_u64(*value), + AttributeValue::I64(value) => serializer.serialize_i64(*value), + AttributeValue::F64(value) => serializer.serialize_f64(*value), + AttributeValue::String(value) => serializer.serialize_str(value), + } + } +} + +impl AttributeValue { + fn dyn_value(self: &AttributeValue) -> &dyn tracing::Value { + match self { + AttributeValue::Bool(value) => value as &dyn tracing::Value, + AttributeValue::U64(value) => value as &dyn tracing::Value, + AttributeValue::I64(value) => value as &dyn tracing::Value, + AttributeValue::F64(value) => value as &dyn tracing::Value, + AttributeValue::String(value) => value as &dyn tracing::Value, + } + } } impl Metrics { @@ -98,12 +131,19 @@ impl Metrics { let attr_name = stringify!([<$($attr __ )+>]).to_string(); match JsonPathInst::from_str($attr_path).expect("json path must be valid").find_slice(value).into_iter().next().as_deref() { // If the value is an object we can only state that it is set, but not what it is set to. - Some(Value::Object(_value)) => {attributes.insert(attr_name, "true".to_string());}, - Some(Value::Array(value)) if !value.is_empty() => {attributes.insert(attr_name, "true".to_string());}, + Some(Value::Object(_value)) => {attributes.insert(attr_name, AttributeValue::Bool(true));}, + Some(Value::Array(value)) if !value.is_empty() => {attributes.insert(attr_name, AttributeValue::Bool(true));}, // Scalars can be logged as is. - Some(value) => {attributes.insert(attr_name, value.to_string());}, + Some(Value::Number(value)) if value.is_f64() => {attributes.insert(attr_name, AttributeValue::F64(value.as_f64().expect("checked, qed")));}, + Some(Value::Number(value)) if value.is_i64() => {attributes.insert(attr_name, AttributeValue::I64(value.as_i64().expect("checked, qed")));}, + Some(Value::Number(value)) => {attributes.insert(attr_name, AttributeValue::U64(value.as_u64().expect("checked, qed")));}, + Some(Value::String(value)) => {attributes.insert(attr_name, AttributeValue::String(value.clone()));}, + Some(Value::Bool(value)) => {attributes.insert(attr_name, AttributeValue::Bool(*value));}, + // If the value is not set we don't specify the attribute. - None => {attributes.insert(attr_name, "false".to_string());}, + None => {attributes.insert(attr_name, AttributeValue::Bool(false));}, + + _ => {}, };)+ (1, attributes) } @@ -113,7 +153,7 @@ impl Metrics { let mut attributes = HashMap::new(); $( let attr_name = stringify!([<$($attr __ )+>]).to_string(); - attributes.insert(attr_name, "false".to_string()); + attributes.insert(attr_name, AttributeValue::Bool(false)); )+ (0, attributes) } @@ -122,7 +162,7 @@ impl Metrics { // Now log the metric paste!{ - tracing::info!($($metric).+ = metric.0, $($($attr).+ = metric.1.get(stringify!([<$($attr __ )+>])).expect("attribute must be in map")),+); + tracing::info!($($metric).+ = metric.0, $($($attr).+ = metric.1.get(stringify!([<$($attr __ )+>])).expect("attribute must be in map").dyn_value()),+); } }; } diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@apq.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@apq.router.yaml.snap index bf5efaf603..9108dfc7a1 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@apq.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@apq.router.yaml.snap @@ -4,7 +4,7 @@ expression: "&metrics.metrics" --- value.apollo.router.config.apq: - 1 - - opt__router__cache__in_memory__: "true" - opt__router__cache__redis__: "true" - opt__subgraph__: "true" + - opt__router__cache__in_memory__: true + opt__router__cache__redis__: true + opt__subgraph__: true diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization.router.yaml.snap index 11f9160614..e45a4962f7 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization.router.yaml.snap @@ -4,6 +4,6 @@ expression: "&metrics.metrics" --- value.apollo.router.config.authorization: - 1 - - opt__directives__: "false" - opt__require_authentication__: "true" + - opt__directives__: false + opt__require_authentication__: true diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization_directives.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization_directives.router.yaml.snap index 61b5d4c144..38462ec606 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization_directives.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization_directives.router.yaml.snap @@ -4,6 +4,6 @@ expression: "&metrics.metrics" --- value.apollo.router.config.authorization: - 1 - - opt__directives__: "true" - opt__require_authentication__: "false" + - opt__directives__: true + opt__require_authentication__: false diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@coprocessor.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@coprocessor.router.yaml.snap index b5eb1df764..bdc1a7899b 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@coprocessor.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@coprocessor.router.yaml.snap @@ -4,10 +4,10 @@ expression: "&metrics.metrics" --- value.apollo.router.config.coprocessor: - 1 - - opt__router__request__: "true" - opt__router__response__: "true" - opt__subgraph__request__: "true" - opt__subgraph__response__: "true" - opt__supergraph__request__: "false" - opt__supergraph__response__: "false" + - opt__router__request__: true + opt__router__response__: true + opt__subgraph__request__: true + opt__subgraph__response__: true + opt__supergraph__request__: false + opt__supergraph__response__: false diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@entities.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@entities.router.yaml.snap index 1bce92d5c8..e4fe10d957 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@entities.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@entities.router.yaml.snap @@ -4,15 +4,15 @@ expression: "&metrics.metrics" --- value.apollo.router.config.entities: - 1 - - opt__cache__: "true" + - opt__cache__: true value.apollo.router.config.traffic_shaping: - 1 - - opt__router__rate_limit__: "false" - opt__router__timout__: "false" - opt__subgraph__compression__: "false" - opt__subgraph__deduplicate_query__: "false" - opt__subgraph__http2__: "false" - opt__subgraph__rate_limit__: "false" - opt__subgraph__retry__: "false" - opt__subgraph__timeout__: "false" + - opt__router__rate_limit__: false + opt__router__timout__: false + opt__subgraph__compression__: false + opt__subgraph__deduplicate_query__: false + opt__subgraph__http2__: false + opt__subgraph__rate_limit__: false + opt__subgraph__retry__: false + opt__subgraph__timeout__: false diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@limits.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@limits.router.yaml.snap index 53807bab66..055f60152d 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@limits.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@limits.router.yaml.snap @@ -4,12 +4,12 @@ expression: "&metrics.metrics" --- value.apollo.router.config.limits: - 1 - - opt__operation__max_aliases__: "true" - opt__operation__max_depth__: "true" - opt__operation__max_height__: "true" - opt__operation__max_root_fields__: "true" - opt__operation__warn_only__: "true" - opt__parser__max_recursion__: "true" - opt__parser__max_tokens__: "true" - opt__request__max_size__: "true" + - opt__operation__max_aliases__: true + opt__operation__max_depth__: true + opt__operation__max_height__: true + opt__operation__max_root_fields__: true + opt__operation__warn_only__: true + opt__parser__max_recursion__: true + opt__parser__max_tokens__: true + opt__request__max_size__: true diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@persisted_queries.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@persisted_queries.router.yaml.snap index 507f9c756f..72b803ca49 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@persisted_queries.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@persisted_queries.router.yaml.snap @@ -4,7 +4,7 @@ expression: "&metrics.metrics" --- value.apollo.router.config.persisted_queries: - 1 - - opt__log_unknown__: "true" - opt__safelist__enabled__: "true" - opt__safelist__require_id__: "true" + - opt__log_unknown__: true + opt__safelist__enabled__: true + opt__safelist__require_id__: true diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@subscriptions.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@subscriptions.router.yaml.snap index 3709a1603d..a019d34928 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@subscriptions.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@subscriptions.router.yaml.snap @@ -4,9 +4,9 @@ expression: "&metrics.metrics" --- value.apollo.router.config.subscriptions: - 1 - - opt__deduplication__: "false" - opt__max_opened__: "true" - opt__mode__callback__: "true" - opt__mode__passthrough__: "true" - opt__queue_capacity__: "true" + - opt__deduplication__: false + opt__max_opened__: true + opt__mode__callback__: true + opt__mode__passthrough__: true + opt__queue_capacity__: true diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@telemetry.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@telemetry.router.yaml.snap index 7e02cf7f31..8ea0c00cab 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@telemetry.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@telemetry.router.yaml.snap @@ -4,10 +4,10 @@ expression: "&metrics.metrics" --- value.apollo.router.config.telemetry: - 1 - - opt__metrics__otlp__: "true" - opt__metrics__prometheus__: "true" - opt__tracing__datadog__: "true" - opt__tracing__jaeger__: "true" - opt__tracing__otlp__: "true" - opt__tracing__zipkin__: "true" + - opt__metrics__otlp__: true + opt__metrics__prometheus__: true + opt__tracing__datadog__: true + opt__tracing__jaeger__: true + opt__tracing__otlp__: true + opt__tracing__zipkin__: true diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@traffic_shaping.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@traffic_shaping.router.yaml.snap index 1cdb685e7d..ab53cd0460 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@traffic_shaping.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@traffic_shaping.router.yaml.snap @@ -4,12 +4,12 @@ expression: "&metrics.metrics" --- value.apollo.router.config.traffic_shaping: - 1 - - opt__router__rate_limit__: "true" - opt__router__timout__: "true" - opt__subgraph__compression__: "true" - opt__subgraph__deduplicate_query__: "true" - opt__subgraph__http2__: "true" - opt__subgraph__rate_limit__: "true" - opt__subgraph__retry__: "true" - opt__subgraph__timeout__: "true" + - opt__router__rate_limit__: true + opt__router__timout__: true + opt__subgraph__compression__: true + opt__subgraph__deduplicate_query__: true + opt__subgraph__http2__: true + opt__subgraph__rate_limit__: true + opt__subgraph__retry__: true + opt__subgraph__timeout__: true diff --git a/apollo-router/src/plugins/telemetry/metrics/layer.rs b/apollo-router/src/plugins/telemetry/metrics/layer.rs index c195891d7b..f80b5552f9 100644 --- a/apollo-router/src/plugins/telemetry/metrics/layer.rs +++ b/apollo-router/src/plugins/telemetry/metrics/layer.rs @@ -24,7 +24,13 @@ use super::METRIC_PREFIX_HISTOGRAM; use super::METRIC_PREFIX_MONOTONIC_COUNTER; use super::METRIC_PREFIX_VALUE; -const I64_MAX: u64 = i64::MAX as u64; +macro_rules! log_and_panic_in_debug_build { + ($($tokens:tt)+) => {{ + tracing::debug!($($tokens)+); + #[cfg(debug_assertions)] + panic!("metric type error, see DEBUG log for details. Release builds will not panic but will still emit a debug log message"); + }}; +} #[derive(Default)] pub(crate) struct Instruments { @@ -159,69 +165,247 @@ pub(crate) struct MetricVisitor<'a> { pub(crate) metric: Option<(&'static str, InstrumentType)>, pub(crate) custom_attributes: Vec, pub(crate) meter: &'a Meter, + attributes_ignored: bool, +} + +impl<'a> MetricVisitor<'a> { + fn set_metric(&mut self, name: &'static str, instrument_type: InstrumentType) { + self.metric = Some((name, instrument_type)); + if self.attributes_ignored { + log_and_panic_in_debug_build!( + metric_name = name, + "metric attributes must be declared after the metric value. Some attributes have been ignored" + ); + } + } } impl<'a> Visit for MetricVisitor<'a> { - fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { - // Do not display the log content - if field.name() != "message" { + fn record_f64(&mut self, field: &Field, value: f64) { + if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { + self.set_metric(metric_name, InstrumentType::CounterF64(value)); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { + self.set_metric(metric_name, InstrumentType::UpDownCounterF64(value)); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { + self.set_metric(metric_name, InstrumentType::HistogramF64(value)); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { + log_and_panic_in_debug_build!( + metric_name, + "gauge must be u64. This metric will be ignored" + ); + } else if self.metric.is_some() { self.custom_attributes.push(KeyValue::new( Key::from_static_str(field.name()), - Value::from(format!("{value:?}")), + Value::from(value), )); + } else { + self.attributes_ignored = true } } - fn record_str(&mut self, field: &Field, value: &str) { - self.custom_attributes.push(KeyValue::new( - Key::from_static_str(field.name()), - Value::from(value.to_string()), - )); + fn record_i64(&mut self, field: &Field, value: i64) { + if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { + log_and_panic_in_debug_build!( + metric_name, + "monotonic counter must be u64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { + self.set_metric(metric_name, InstrumentType::UpDownCounterI64(value)); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { + self.set_metric(metric_name, InstrumentType::HistogramI64(value)); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { + log_and_panic_in_debug_build!( + metric_name, + "gauge must be u64. This metric will be ignored" + ); + } else if self.metric.is_some() { + self.custom_attributes.push(KeyValue::new( + Key::from_static_str(field.name()), + Value::from(value), + )); + } else { + self.attributes_ignored = true + } } fn record_u64(&mut self, field: &Field, value: u64) { if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { - self.metric = Some((metric_name, InstrumentType::CounterU64(value))); + self.set_metric(metric_name, InstrumentType::CounterU64(value)); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { - if value <= I64_MAX { - self.metric = Some((metric_name, InstrumentType::UpDownCounterI64(value as i64))); - } else { - eprintln!( - "[tracing-opentelemetry]: Received Counter metric, but \ - provided u64: {value} is greater than i64::MAX. Ignoring \ - this metric." - ); - } + log_and_panic_in_debug_build!( + metric_name, + "counter must be i64. This metric will be ignored" + ); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { - self.metric = Some((metric_name, InstrumentType::HistogramU64(value))); + self.set_metric(metric_name, InstrumentType::HistogramU64(value)); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { - self.metric = Some((metric_name, InstrumentType::GaugeU64(value))); + self.set_metric(metric_name, InstrumentType::GaugeU64(value)); + } else if self.metric.is_some() { + log_and_panic_in_debug_build!( + name = field.name(), + "metric attribute must be i64, f64, string or bool. This attribute will be ignored" + ); } else { - self.record_debug(field, &value); + self.attributes_ignored = true } } - fn record_f64(&mut self, field: &Field, value: f64) { + fn record_i128(&mut self, field: &Field, _value: i128) { if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { - self.metric = Some((metric_name, InstrumentType::CounterF64(value))); + log_and_panic_in_debug_build!( + metric_name, + "monotonic counter must be u64 or f64. This metric will be ignored" + ); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { - self.metric = Some((metric_name, InstrumentType::UpDownCounterF64(value))); + log_and_panic_in_debug_build!( + metric_name, + "counter must be i64. This metric will be ignored" + ); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { - self.metric = Some((metric_name, InstrumentType::HistogramF64(value))); + log_and_panic_in_debug_build!( + metric_name, + "histogram must be u64, i64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { + log_and_panic_in_debug_build!( + metric_name, + "gauge must be u64. This metric will be ignored" + ); + } else if self.metric.is_some() { + log_and_panic_in_debug_build!( + name = field.name(), + "metric attribute must be i64, f64, string or bool. This attribute will be ignored" + ); } else { - self.record_debug(field, &value); + self.attributes_ignored = true } } - fn record_i64(&mut self, field: &Field, value: i64) { + fn record_u128(&mut self, field: &Field, _value: u128) { if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { - self.metric = Some((metric_name, InstrumentType::CounterU64(value as u64))); + log_and_panic_in_debug_build!( + metric_name, + "monotonic counter must be u64 or f64. This metric will be ignored" + ); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { - self.metric = Some((metric_name, InstrumentType::UpDownCounterI64(value))); + log_and_panic_in_debug_build!( + metric_name, + "counter must be i64. This metric will be ignored" + ); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { - self.metric = Some((metric_name, InstrumentType::HistogramI64(value))); + log_and_panic_in_debug_build!( + metric_name, + "histogram must be u64, i64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { + log_and_panic_in_debug_build!( + metric_name, + "gauge must be u64. This metric will be ignored" + ); + } else if self.metric.is_some() { + log_and_panic_in_debug_build!( + name = field.name(), + "metric attribute must be i64, f64, string or bool. This attribute will be ignored" + ); } else { - self.record_debug(field, &value); + self.attributes_ignored = true + } + } + + fn record_bool(&mut self, field: &Field, value: bool) { + if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { + log_and_panic_in_debug_build!( + metric_name, + "monotonic counter must be u64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { + log_and_panic_in_debug_build!( + metric_name, + "counter must be i64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { + log_and_panic_in_debug_build!( + metric_name, + "histogram must be u64, i64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { + log_and_panic_in_debug_build!( + metric_name, + "gauge must be u64. This metric will be ignored" + ); + } else if self.metric.is_some() { + self.custom_attributes.push(KeyValue::new( + Key::from_static_str(field.name()), + Value::from(value), + )); + } else { + self.attributes_ignored = true + } + } + + fn record_str(&mut self, field: &Field, value: &str) { + if field.name() != "message" { + if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { + log_and_panic_in_debug_build!( + metric_name, + "monotonic counter must be u64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { + log_and_panic_in_debug_build!( + metric_name, + "counter must be i64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { + log_and_panic_in_debug_build!( + metric_name, + "histogram must be u64, i64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { + log_and_panic_in_debug_build!( + metric_name, + "gauge must be u64. This metric will be ignored" + ); + } else if self.metric.is_some() { + self.custom_attributes.push(KeyValue::new( + Key::from_static_str(field.name()), + Value::from(value.to_string()), + )); + } else { + self.attributes_ignored = true + } + } + } + + fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { + if field.name() != "message" { + if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { + log_and_panic_in_debug_build!( + metric_name, + "monotonic counter must be u64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { + log_and_panic_in_debug_build!( + metric_name, + "counter must be i64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { + log_and_panic_in_debug_build!( + metric_name, + "histogram must be u64, i64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { + log_and_panic_in_debug_build!( + metric_name, + "gauge must be u64. This metric will be ignored" + ); + } else if self.metric.is_some() { + self.custom_attributes.push(KeyValue::new( + Key::from_static_str(field.name()), + Value::from(format!("{value:?}")), + )); + } else { + self.attributes_ignored = true + } } } } @@ -265,6 +449,7 @@ where meter: &self.meter, metric: None, custom_attributes: Vec::new(), + attributes_ignored: false, }; event.record(&mut metric_visitor); metric_visitor.finish(); diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index c96157e4e4..0f46df3750 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -853,7 +853,7 @@ impl Telemetry { } ::tracing::info!( monotonic_counter.apollo.router.operations = 1u64, - http.response.status_code = parts.status.as_u16(), + http.response.status_code = parts.status.as_u16() as i64, ); let response = http::Response::from_parts( parts, @@ -869,7 +869,7 @@ impl Telemetry { ::tracing::info!( monotonic_counter.apollo.router.operations = 1u64, - http.response.status_code = 500, + http.response.status_code = 500i64, ); Err(err) } diff --git a/apollo-router/src/plugins/traffic_shaping/timeout/future.rs b/apollo-router/src/plugins/traffic_shaping/timeout/future.rs index 924fe6b215..8a390b393e 100644 --- a/apollo-router/src/plugins/traffic_shaping/timeout/future.rs +++ b/apollo-router/src/plugins/traffic_shaping/timeout/future.rs @@ -49,7 +49,7 @@ where match Pin::new(&mut this.sleep).poll(cx) { Poll::Pending => Poll::Pending, Poll::Ready(_) => { - tracing::info!(monotonic_counter.apollo_router_timeout = 1,); + tracing::info!(monotonic_counter.apollo_router_timeout = 1u64,); Poll::Ready(Err(Elapsed::new().into())) } } diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs index e4929420de..7805d127f9 100644 --- a/apollo-router/src/query_planner/bridge_query_planner.rs +++ b/apollo-router/src/query_planner/bridge_query_planner.rs @@ -91,7 +91,7 @@ impl BridgeQueryPlanner { if has_validation_errors && !schema.has_errors() { tracing::warn!( - monotonic_counter.apollo.router.validation = 1, + monotonic_counter.apollo.router.validation = 1u64, validation.source = VALIDATION_SOURCE_SCHEMA, validation.result = VALIDATION_FALSE_NEGATIVE, "validation mismatch: JS query planner reported a schema validation error, but apollo-rs did not" @@ -106,7 +106,7 @@ impl BridgeQueryPlanner { if configuration.experimental_graphql_validation_mode == GraphQLValidationMode::Both { if schema.has_errors() { tracing::warn!( - monotonic_counter.apollo.router.validation = 1, + monotonic_counter.apollo.router.validation = 1u64, validation.source = VALIDATION_SOURCE_SCHEMA, validation.result = VALIDATION_FALSE_POSITIVE, "validation mismatch: apollo-rs reported a schema validation error, but JS query planner did not" @@ -114,7 +114,7 @@ impl BridgeQueryPlanner { } else { // false_negative was an early return so we know it was correct here tracing::info!( - monotonic_counter.apollo.router.validation = 1, + monotonic_counter.apollo.router.validation = 1u64, validation.source = VALIDATION_SOURCE_SCHEMA, validation.result = VALIDATION_MATCH ); @@ -286,7 +286,7 @@ impl BridgeQueryPlanner { match (is_validation_error, &selections.validation_error) { (false, Some(_)) => { tracing::warn!( - monotonic_counter.apollo.router.validation = 1, + monotonic_counter.apollo.router.validation = 1u64, validation.source = VALIDATION_SOURCE_OPERATION, validation.result = VALIDATION_FALSE_POSITIVE, "validation mismatch: JS query planner did not report query validation error, but apollo-rs did" @@ -294,7 +294,7 @@ impl BridgeQueryPlanner { } (true, None) => { tracing::warn!( - monotonic_counter.apollo.router.validation = 1, + monotonic_counter.apollo.router.validation = 1u64, validation.source = VALIDATION_SOURCE_OPERATION, validation.result = VALIDATION_FALSE_NEGATIVE, "validation mismatch: apollo-rs did not report query validation error, but JS query planner did" @@ -302,7 +302,7 @@ impl BridgeQueryPlanner { } // if JS and Rust implementations agree, we return the JS result for now. _ => tracing::info!( - monotonic_counter.apollo.router.validation = 1, + monotonic_counter.apollo.router.validation = 1u64, validation.source = VALIDATION_SOURCE_OPERATION, validation.result = VALIDATION_MATCH, ), diff --git a/apollo-router/src/uplink/mod.rs b/apollo-router/src/uplink/mod.rs index b72ef91935..2fb38ef4d1 100644 --- a/apollo-router/src/uplink/mod.rs +++ b/apollo-router/src/uplink/mod.rs @@ -196,7 +196,7 @@ where match fetch::(&client, &query_body, &mut endpoints.iter()).await { Ok(response) => { tracing::info!( - counter.apollo_router_uplink_fetch_count_total = 1, + monotonic_counter.apollo_router_uplink_fetch_count_total = 1u64, status = "success", query ); @@ -245,7 +245,7 @@ where } Err(err) => { tracing::info!( - counter.apollo_router_uplink_fetch_count_total = 1, + monotonic_counter.apollo_router_uplink_fetch_count_total = 1u64, status = "failure", query ); From 6b8acdc28da5fa581dcfee2b441667be8b411a37 Mon Sep 17 00:00:00 2001 From: Jeremy Lempereur Date: Wed, 6 Sep 2023 13:09:34 +0200 Subject: [PATCH 32/50] Update rust toolchain to 1.72.0 (#3707) Update rust toolchain to 1.72.0 --- .changesets/maint_igni_rust_1_72_0.md | 5 +++++ Cargo.toml | 2 +- apollo-router-scaffold/templates/base/Dockerfile | 2 +- apollo-router/Cargo.toml | 2 +- apollo-router/README.md | 2 +- apollo-router/src/json_ext.rs | 2 +- apollo-router/src/plugins/authorization/mod.rs | 6 +++--- apollo-router/src/plugins/coprocessor_test.rs | 16 ++++++++-------- apollo-router/src/query_planner/execution.rs | 6 +++--- apollo-router/src/query_planner/labeler.rs | 5 +++-- apollo-router/src/query_planner/plan.rs | 2 +- apollo-router/src/services/execution_service.rs | 4 ++-- apollo-router/src/spec/operation_limits.rs | 5 ++--- apollo-router/src/spec/query/transform.rs | 15 +++++++++------ apollo-router/src/uplink/mod.rs | 2 +- docs/source/customizations/custom-binary.mdx | 2 +- rust-toolchain.toml | 4 ++-- 17 files changed, 45 insertions(+), 37 deletions(-) create mode 100644 .changesets/maint_igni_rust_1_72_0.md diff --git a/.changesets/maint_igni_rust_1_72_0.md b/.changesets/maint_igni_rust_1_72_0.md new file mode 100644 index 0000000000..9371f0dcfa --- /dev/null +++ b/.changesets/maint_igni_rust_1_72_0.md @@ -0,0 +1,5 @@ +### Update rust toolchain to 1.72.0 ([PR #3707](https://github.com/apollographql/router/pull/3707)) + +The router-bridge update now allows us to use the latest rust version. + +By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/3707 diff --git a/Cargo.toml b/Cargo.toml index 9c44480765..4861dbc154 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,4 +1,5 @@ [workspace] +resolver = "2" default-members = ["apollo-router"] members = [ "apollo-router", @@ -40,4 +41,3 @@ incremental = false [profile.release-dhat] inherits = "release" debug = 1 - diff --git a/apollo-router-scaffold/templates/base/Dockerfile b/apollo-router-scaffold/templates/base/Dockerfile index 3514b700dc..db76b6d0bb 100644 --- a/apollo-router-scaffold/templates/base/Dockerfile +++ b/apollo-router-scaffold/templates/base/Dockerfile @@ -1,6 +1,6 @@ # Use the rust build image from docker as our base # renovate-automation: rustc version -FROM rust:1.71.1 as build +FROM rust:1.72.0 as build # Set our working directory for the build WORKDIR /usr/src/router diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 7e8d6fed3e..4f91e8a2dc 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -8,7 +8,7 @@ description = "A configurable, high-performance routing runtime for Apollo Feder license = "Elastic-2.0" # renovate-automation: rustc version -rust-version = "1.71.1" +rust-version = "1.72.0" edition = "2021" build = "build/main.rs" diff --git a/apollo-router/README.md b/apollo-router/README.md index b2e5cec865..4934b8a099 100644 --- a/apollo-router/README.md +++ b/apollo-router/README.md @@ -27,4 +27,4 @@ Most Apollo Router features can be defined using our [YAML configuration](https: If you prefer to write customizations in Rust or need more advanced customizations, see our section on [native customizations](https://www.apollographql.com/docs/router/customizations/native) for information on how to use `apollo-router` as a Rust library. We also publish Rust-specific documentation on our [`apollo-router` crate docs](https://docs.rs/crate/apollo-router). -The minimum supported Rust version (MSRV) for this version of `apollo-router` is **1.71.1**. +The minimum supported Rust version (MSRV) for this version of `apollo-router` is **1.72.0**. diff --git a/apollo-router/src/json_ext.rs b/apollo-router/src/json_ext.rs index e05b010550..6955f15924 100644 --- a/apollo-router/src/json_ext.rs +++ b/apollo-router/src/json_ext.rs @@ -144,7 +144,7 @@ impl ValueExt for Value { a_value.deep_merge(b_value); } - a.extend(b.into_iter()); + a.extend(b); } (_, Value::Null) => {} (Value::Object(_), Value::Array(_)) => { diff --git a/apollo-router/src/plugins/authorization/mod.rs b/apollo-router/src/plugins/authorization/mod.rs index babcc91c1b..7bc42dd114 100644 --- a/apollo-router/src/plugins/authorization/mod.rs +++ b/apollo-router/src/plugins/authorization/mod.rs @@ -252,7 +252,7 @@ impl AuthorizationPlugin { let compiler = match filter_res { None => compiler, Some((query, paths)) => { - unauthorized_paths.extend(paths.into_iter()); + unauthorized_paths.extend(paths); if query.is_empty() { return Err(QueryPlannerError::Unauthorized(unauthorized_paths)); @@ -272,7 +272,7 @@ impl AuthorizationPlugin { let compiler = match filter_res { None => compiler, Some((query, paths)) => { - unauthorized_paths.extend(paths.into_iter()); + unauthorized_paths.extend(paths); if query.is_empty() { return Err(QueryPlannerError::Unauthorized(unauthorized_paths)); @@ -292,7 +292,7 @@ impl AuthorizationPlugin { let compiler = match filter_res { None => compiler, Some((query, paths)) => { - unauthorized_paths.extend(paths.into_iter()); + unauthorized_paths.extend(paths); if query.is_empty() { return Err(QueryPlannerError::Unauthorized(unauthorized_paths)); diff --git a/apollo-router/src/plugins/coprocessor_test.rs b/apollo-router/src/plugins/coprocessor_test.rs index 297391af5e..26ef229eb7 100644 --- a/apollo-router/src/plugins/coprocessor_test.rs +++ b/apollo-router/src/plugins/coprocessor_test.rs @@ -287,7 +287,7 @@ mod tests { Box::pin(async { Ok(hyper::Response::builder() .body(Body::from( - r##"{ + r#"{ "version": 1, "stage": "SubgraphRequest", "control": { @@ -299,7 +299,7 @@ mod tests { "body": "Errors need a message, this will fail to deserialize" }] } - }"##, + }"#, )) .unwrap()) }) @@ -386,7 +386,7 @@ mod tests { Box::pin(async { Ok(hyper::Response::builder() .body(Body::from( - r##"{ + r#"{ "version": 1, "stage": "SubgraphRequest", "control": "continue", @@ -429,7 +429,7 @@ mod tests { }, "serviceName": "service name shouldn't change", "uri": "http://thisurihaschanged" - }"##, + }"#, )) .unwrap()) }) @@ -478,7 +478,7 @@ mod tests { Box::pin(async { Ok(hyper::Response::builder() .body(Body::from( - r##"{ + r#"{ "version": 1, "stage": "SubgraphRequest", "control": { @@ -495,7 +495,7 @@ mod tests { "headers": { "aheader": ["a value"] } - }"##, + }"#, )) .unwrap()) }) @@ -556,7 +556,7 @@ mod tests { Box::pin(async { Ok(hyper::Response::builder() .body(Body::from( - r##"{ + r#"{ "version": 1, "stage": "SubgraphResponse", "headers": { @@ -598,7 +598,7 @@ mod tests { "this-is-a-test-context": 42 } } - }"##, + }"#, )) .unwrap()) }) diff --git a/apollo-router/src/query_planner/execution.rs b/apollo-router/src/query_planner/execution.rs index 0cb52f333f..35945844cc 100644 --- a/apollo-router/src/query_planner/execution.rs +++ b/apollo-router/src/query_planner/execution.rs @@ -439,7 +439,7 @@ impl DeferredNode { let (primary_value, primary_errors) = primary_receiver.recv().await.unwrap_or_default(); value.deep_merge(primary_value); - errors.extend(primary_errors.into_iter()) + errors.extend(primary_errors) } else { while let Some((v, _remaining)) = stream.next().await { // a Err(RecvError) means either that the fetch was not performed and the @@ -486,7 +486,7 @@ impl DeferredNode { let (primary_value, primary_errors) = primary_receiver.recv().await.unwrap_or_default(); v.deep_merge(primary_value); - errors.extend(primary_errors.into_iter()) + errors.extend(primary_errors) } if let Err(e) = tx @@ -511,7 +511,7 @@ impl DeferredNode { let (primary_value, primary_errors) = primary_receiver.recv().await.unwrap_or_default(); value.deep_merge(primary_value); - errors.extend(primary_errors.into_iter()); + errors.extend(primary_errors); if let Err(e) = tx .send( diff --git a/apollo-router/src/query_planner/labeler.rs b/apollo-router/src/query_planner/labeler.rs index cdf7cf4ea6..7bbb095eb7 100644 --- a/apollo-router/src/query_planner/labeler.rs +++ b/apollo-router/src/query_planner/labeler.rs @@ -65,8 +65,9 @@ impl<'a> Visitor for Labeler<'a> { ) -> Result, BoxError> { let parent_type = hir.type_condition().unwrap_or(parent_type); - let Some(selection_set) = selection_set(self, hir.selection_set(), parent_type)? - else { return Ok(None) }; + let Some(selection_set) = selection_set(self, hir.selection_set(), parent_type)? else { + return Ok(None); + }; let mut encoder_node = apollo_encoder::InlineFragment::new(selection_set); diff --git a/apollo-router/src/query_planner/plan.rs b/apollo-router/src/query_planner/plan.rs index b2f87ffe48..803ceaab3b 100644 --- a/apollo-router/src/query_planner/plan.rs +++ b/apollo-router/src/query_planner/plan.rs @@ -234,7 +234,7 @@ impl PlanNode { Self::Subscription { primary, rest } => match rest { Some(rest) => Box::new( rest.service_usage() - .chain(Some(primary.service_name.as_str()).into_iter()), + .chain(Some(primary.service_name.as_str())), ) as Box + 'a>, None => Box::new(Some(primary.service_name.as_str()).into_iter()), }, diff --git a/apollo-router/src/services/execution_service.rs b/apollo-router/src/services/execution_service.rs index 5e38fab3a6..3de41b2891 100644 --- a/apollo-router/src/services/execution_service.rs +++ b/apollo-router/src/services/execution_service.rs @@ -267,9 +267,9 @@ impl ExecutionService { schema.api_schema(), variables_set, ) - .into_iter(), + , ); - nullified_paths.extend(paths.into_iter()); + nullified_paths.extend(paths); }); match (response.path.as_ref(), response.data.as_ref()) { diff --git a/apollo-router/src/spec/operation_limits.rs b/apollo-router/src/spec/operation_limits.rs index e0ed96f15a..c6815382a9 100644 --- a/apollo-router/src/spec/operation_limits.rs +++ b/apollo-router/src/spec/operation_limits.rs @@ -81,12 +81,11 @@ pub(crate) fn check( debug_assert_eq!(ids.len(), 1); let query_id = ids[0]; - let Some(operation) = compiler.db.find_operation(query_id, operation_name.clone()) - else { + let Some(operation) = compiler.db.find_operation(query_id, operation_name.clone()) else { // Undefined or ambiguous operation name. // The request is invalid and will be rejected by some other part of the router, // if it wasn’t already before we got to this code path. - return Ok(()) + return Ok(()); }; let mut fragment_cache = HashMap::new(); diff --git a/apollo-router/src/spec/query/transform.rs b/apollo-router/src/spec/query/transform.rs index 3ece3b907a..76bfe0f9be 100644 --- a/apollo-router/src/spec/query/transform.rs +++ b/apollo-router/src/spec/query/transform.rs @@ -116,8 +116,9 @@ pub(crate) fn operation( .ok_or("ObjectTypeDefMissing")?; let type_name = object_type.name(); - let Some(selection_set) = selection_set(visitor, def.selection_set(), type_name)? - else { return Ok(None) }; + let Some(selection_set) = selection_set(visitor, def.selection_set(), type_name)? else { + return Ok(None); + }; let mut encoder_node = apollo_encoder::OperationDefinition::new(operation_type(def.operation_ty()), selection_set); @@ -151,8 +152,9 @@ pub(crate) fn fragment_definition( let name = hir.name(); let type_condition = hir.type_condition(); - let Some(selection_set) = selection_set(visitor, hir.selection_set(), type_condition)? - else { return Ok(None) }; + let Some(selection_set) = selection_set(visitor, hir.selection_set(), type_condition)? else { + return Ok(None); + }; let type_condition = apollo_encoder::TypeCondition::new(type_condition.into()); let mut encoder_node = @@ -237,8 +239,9 @@ pub(crate) fn inline_fragment( ) -> Result, BoxError> { let parent_type = hir.type_condition().unwrap_or(parent_type); - let Some(selection_set) = selection_set(visitor, hir.selection_set(), parent_type)? - else { return Ok(None) }; + let Some(selection_set) = selection_set(visitor, hir.selection_set(), parent_type)? else { + return Ok(None); + }; let mut encoder_node = apollo_encoder::InlineFragment::new(selection_set); diff --git a/apollo-router/src/uplink/mod.rs b/apollo-router/src/uplink/mod.rs index 2fb38ef4d1..1399a6b606 100644 --- a/apollo-router/src/uplink/mod.rs +++ b/apollo-router/src/uplink/mod.rs @@ -78,7 +78,7 @@ pub enum Endpoints { impl Default for Endpoints { fn default() -> Self { Self::fallback( - vec![GCP_URL, AWS_URL] + [GCP_URL, AWS_URL] .iter() .map(|url| Url::parse(url).expect("default urls must be valid")) .collect(), diff --git a/docs/source/customizations/custom-binary.mdx b/docs/source/customizations/custom-binary.mdx index 0934ac7cca..e5a7750124 100644 --- a/docs/source/customizations/custom-binary.mdx +++ b/docs/source/customizations/custom-binary.mdx @@ -20,7 +20,7 @@ import ElasticNotice from '../../shared/elastic-notice.mdx'; To compile the Apollo Router, you need to have the following installed: -* [Rust 1.71.1 or later](https://www.rust-lang.org/tools/install) +* [Rust 1.72.0 or later](https://www.rust-lang.org/tools/install) * [Node.js 16.9.1 or later](https://nodejs.org/en/download/) * [CMake 3.5.1 or later](https://cmake.org/download/) diff --git a/rust-toolchain.toml b/rust-toolchain.toml index d502368c84..4bfc48e551 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] # renovate-automation: rustc version -channel = "1.71.1" -components = [ "rustfmt", "clippy" ] +channel = "1.72.0" +components = ["rustfmt", "clippy"] From 196902a9a9e82bf910088db504da879a4f6473fe Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Wed, 6 Sep 2023 14:20:57 +0200 Subject: [PATCH 33/50] Add experimental caching metrics (#3558) Fix #3554 This creates a new metric recorded only if we set the configuration option `telemetry.metrics.common.experimental_cache_metrics.enabled` to `true`. * `apollo.router.operations.entity` (histogram): cache hit ratio per subgraph and entity type This simulates an entity cache to find out if it would be useful. Each time we do a subgraph query, we use as cache key: - subgraph name - entity type - query - vary headers - entity key We record if we have seen this entity before (using a bloom filter) and calculate the cache hit ratio for that query, per subgraph and entity type. --------- Signed-off-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> Co-authored-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> --- .changesets/maint_bnjjj_caching_metrics.md | 38 +++ Cargo.lock | 18 ++ apollo-router/Cargo.toml | 3 +- ...nfiguration__tests__schema_generation.snap | 17 ++ apollo-router/src/plugins/telemetry/config.rs | 23 ++ apollo-router/src/plugins/telemetry/mod.rs | 227 +++++++++++++++++- .../telemetry/tracing/apollo_telemetry.rs | 2 +- .../src/plugins/traffic_shaping/cache.rs | 91 +++++-- .../src/plugins/traffic_shaping/mod.rs | 5 +- 9 files changed, 393 insertions(+), 31 deletions(-) create mode 100644 .changesets/maint_bnjjj_caching_metrics.md diff --git a/.changesets/maint_bnjjj_caching_metrics.md b/.changesets/maint_bnjjj_caching_metrics.md new file mode 100644 index 0000000000..cd09700cfd --- /dev/null +++ b/.changesets/maint_bnjjj_caching_metrics.md @@ -0,0 +1,38 @@ +### Add experimental caching metrics ([PR #3532](https://github.com/apollographql/router/pull/3532)) + +It adds a metric only if you configure `telemetry.metrics.common.experimental_cache_metrics.enabled` to `true`. It will generate metrics to evaluate which entities would benefit from caching. It simulates a cache with a TTL, configurable at `telemetry.metrics.common.experimental_cache_metrics.ttl` (default: 5 seconds), and measures the cache hit rate per entity type and subgraph. + +example + +``` +# HELP apollo.router.operations.entity.cache_hit +# TYPE apollo_router_operations_entity.cache_hit histogram +apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="0.05"} 0 +apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="0.1"} 0 +apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="0.25"} 0 +apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="0.5"} 0 +apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="1"} 0 +apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="2.5"} 3 +apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="5"} 4 +apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="10"} 4 +apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="20"} 4 +apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="1000"} 4 +apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="+Inf"} 4 +apollo_router_operations_entity_cache_hitsum{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version=""} 7 +apollo_router_operations_entity_cache_hitcount{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version=""} 4 +apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="0.05"} 0 +apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="0.1"} 0 +apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="0.25"} 0 +apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="0.5"} 0 +apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="1"} 0 +apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="2.5"} 1 +apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="5"} 1 +apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="10"} 1 +apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="20"} 1 +apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="1000"} 1 +apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="+Inf"} 1 +apollo_router_operations_entity_cache_hitsum{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version=""} 1 +apollo_router_operations_entity_cache_hitcount{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version=""} 1 +``` + +By [@bnjjj](https://github.com/bnjjj) [@Geal](https://github.com/geal) in https://github.com/apollographql/router/pull/3532 \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 44bc38cd31..212d0f17d8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -281,6 +281,7 @@ dependencies = [ "aws-types", "axum", "base64 0.21.2", + "bloomfilter", "brotli", "buildstructor 0.5.3", "bytes", @@ -1098,6 +1099,17 @@ dependencies = [ "generic-array 0.14.7", ] +[[package]] +name = "bloomfilter" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b92db7965d438b8b4b1c1d0aedd188440a1084593c9eb7f6657e3df7e906d934" +dependencies = [ + "bit-vec", + "getrandom 0.2.10", + "siphasher", +] + [[package]] name = "brotli" version = "3.3.4" @@ -5793,6 +5805,12 @@ dependencies = [ "time", ] +[[package]] +name = "siphasher" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54ac45299ccbd390721be55b412d41931911f654fa99e2cb8bfb57184b2061fe" + [[package]] name = "slab" version = "0.4.8" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 4f91e8a2dc..b9abf26a6b 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -67,6 +67,7 @@ async-trait = "0.1.73" atty = "0.2.14" axum = { version = "0.6.20", features = ["headers", "json", "original-uri"] } base64 = "0.21.2" +bloomfilter = "1.0.12" buildstructor = "0.5.3" bytes = "1.4.0" clap = { version = "4.4.2", default-features = false, features = [ @@ -163,6 +164,7 @@ prost = "0.11.9" prost-types = "0.11.9" proteus = "0.5.0" rand = "0.8.5" +rand_core = "0.6.4" rhai = { version = "1.15.1", features = ["sync", "serde", "internals"] } regex = "1.9.5" reqwest = { version = "0.11.19", default-features = false, features = [ @@ -236,7 +238,6 @@ memchr = "2.6.3" brotli = "3.3.4" zstd = "0.12.4" zstd-safe = "6.0.6" -rand_core = "0.6.4" # note: AWS dependencies should always use the same version aws-sigv4 = "0.56.0" aws-credential-types = "0.56.0" diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index e54c9226cd..ff9123ab31 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -4314,6 +4314,23 @@ expression: "&schema" "format": "double" } }, + "experimental_cache_metrics": { + "description": "Experimental metrics to know more about caching strategies", + "type": "object", + "properties": { + "enabled": { + "description": "Enable experimental metrics", + "default": false, + "type": "boolean" + }, + "ttl": { + "description": "Potential TTL for a cache if we had one (default: 5secs)", + "default": "5s", + "type": "string" + } + }, + "additionalProperties": false + }, "resources": { "description": "Resources", "default": {}, diff --git a/apollo-router/src/plugins/telemetry/config.rs b/apollo-router/src/plugins/telemetry/config.rs index 11e76f1bcf..c0cff8118f 100644 --- a/apollo-router/src/plugins/telemetry/config.rs +++ b/apollo-router/src/plugins/telemetry/config.rs @@ -94,6 +94,28 @@ pub(crate) struct MetricsCommon { /// Custom buckets for histograms #[serde(default = "default_buckets")] pub(crate) buckets: Vec, + /// Experimental metrics to know more about caching strategies + pub(crate) experimental_cache_metrics: ExperimentalCacheMetricsConf, +} + +#[derive(Clone, Debug, Deserialize, JsonSchema)] +#[serde(deny_unknown_fields, rename_all = "snake_case", default)] +pub(crate) struct ExperimentalCacheMetricsConf { + /// Enable experimental metrics + pub(crate) enabled: bool, + #[serde(with = "humantime_serde")] + #[schemars(with = "String")] + /// Potential TTL for a cache if we had one (default: 5secs) + pub(crate) ttl: Duration, +} + +impl Default for ExperimentalCacheMetricsConf { + fn default() -> Self { + Self { + enabled: false, + ttl: Duration::from_secs(5), + } + } } fn default_buckets() -> Vec { @@ -110,6 +132,7 @@ impl Default for MetricsCommon { service_namespace: None, resources: HashMap::new(), buckets: default_buckets(), + experimental_cache_metrics: ExperimentalCacheMetricsConf::default(), } } } diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index 0f46df3750..67450787e9 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -11,6 +11,7 @@ use ::tracing::field; use ::tracing::info_span; use ::tracing::Span; use axum::headers::HeaderName; +use bloomfilter::Bloom; use dashmap::DashMap; use futures::future::ready; use futures::future::BoxFuture; @@ -38,6 +39,7 @@ use opentelemetry::trace::TraceState; use opentelemetry::trace::TracerProvider; use opentelemetry::Context as OtelContext; use opentelemetry::KeyValue; +use parking_lot::Mutex; use rand::Rng; use router_bridge::planner::UsageReporting; use serde_json_bytes::json; @@ -72,6 +74,9 @@ use self::reload::NullFieldFormatter; use self::reload::SamplingFilter; use self::reload::OPENTELEMETRY_TRACER_HANDLE; use self::tracing::apollo_telemetry::APOLLO_PRIVATE_DURATION_NS; +use super::traffic_shaping::cache::hash_request; +use super::traffic_shaping::cache::hash_vary_headers; +use super::traffic_shaping::cache::REPRESENTATIONS; use crate::axum_factory::utils::REQUEST_SPAN_NAME; use crate::context::OPERATION_NAME; use crate::layers::ServiceBuilderExt; @@ -117,6 +122,7 @@ use crate::services::SubgraphRequest; use crate::services::SubgraphResponse; use crate::services::SupergraphRequest; use crate::services::SupergraphResponse; +use crate::spec::TYPENAME; use crate::tracer::TraceId; use crate::Context; use crate::ListenAddr; @@ -162,6 +168,7 @@ pub(crate) struct Telemetry { tracer_provider: Option, meter_provider: AggregateMeterProvider, + counter: Option>>, } #[derive(Debug)] @@ -244,7 +251,21 @@ impl Plugin for Telemetry { config.calculate_field_level_instrumentation_ratio()?; let mut metrics_builder = Self::create_metrics_builder(&config)?; let meter_provider = metrics_builder.meter_provider(); + let counter = config + .metrics + .as_ref() + .and_then(|m| m.common.as_ref()) + .and_then(|c| { + if c.experimental_cache_metrics.enabled { + Some(Arc::new(Mutex::new(CacheCounter::new( + c.experimental_cache_metrics.ttl, + )))) + } else { + None + } + }); let (sampling_filter_ratio, tracer_provider) = Self::create_tracer_provider(&config)?; + Ok(Telemetry { custom_endpoints: metrics_builder.custom_endpoints(), metrics_exporters: metrics_builder.exporters(), @@ -255,6 +276,7 @@ impl Plugin for Telemetry { meter_provider, sampling_filter_ratio, config: Arc::new(config), + counter, }) } @@ -477,7 +499,10 @@ impl Plugin for Telemetry { let subgraph_metrics_conf_req = self.create_subgraph_metrics_conf(name); let subgraph_metrics_conf_resp = subgraph_metrics_conf_req.clone(); let subgraph_name = ByteString::from(name); + let cache_metrics_enabled = self.counter.is_some(); + let counter = self.counter.clone(); let name = name.to_owned(); + let subgraph_name_arc = Arc::new(name.to_owned()); ServiceBuilder::new() .instrument(move |req: &SubgraphRequest| { let query = req @@ -502,7 +527,16 @@ impl Plugin for Telemetry { "apollo_private.ftv1" = field::Empty ) }) - .map_request(request_ftv1) + .map_request(move |mut req: SubgraphRequest| { + let cache_attributes = cache_metrics_enabled + .then(|| Self::get_cache_attributes(subgraph_name_arc.clone(), &mut req)) + .flatten(); + if let Some(cache_attributes) = cache_attributes { + req.context.private_entries.lock().insert(cache_attributes); + } + + request_ftv1(req) + }) .map_response(move |resp| store_ftv1(&subgraph_name, resp)) .map_future_with_request_data( move |sub_request: &SubgraphRequest| { @@ -510,13 +544,16 @@ impl Plugin for Telemetry { subgraph_metrics_conf_req.clone(), sub_request, ); - sub_request.context.clone() + let cache_attributes = sub_request.context.private_entries.lock().remove(); + + (sub_request.context.clone(), cache_attributes) }, - move |context: Context, + move |(context, cache_attributes): (Context, Option), f: BoxFuture<'static, Result>| { let metrics = metrics.clone(); let subgraph_attribute = subgraph_attribute.clone(); let subgraph_metrics_conf = subgraph_metrics_conf_resp.clone(); + let counter = counter.clone(); // Using Instant because it is guaranteed to be monotonically increasing. let now = Instant::now(); f.map(move |result: Result| { @@ -526,6 +563,8 @@ impl Plugin for Telemetry { subgraph_attribute, subgraph_metrics_conf, now, + counter, + cache_attributes, &result, ); result @@ -1032,6 +1071,63 @@ impl Telemetry { ) } + fn get_cache_attributes( + subgraph_name: Arc, + sub_request: &mut Request, + ) -> Option { + let body = dbg!(sub_request.subgraph_request.body_mut()); + let hashed_query = hash_request(body); + let representations = body + .variables + .get(REPRESENTATIONS) + .and_then(|value| value.as_array())?; + + let keys = extract_cache_attributes(representations).ok()?; + + Some(CacheAttributes { + subgraph_name, + headers: sub_request.subgraph_request.headers().clone(), + hashed_query: Arc::new(hashed_query), + representations: keys, + }) + } + + fn update_cache_metrics( + counter: Arc>, + sub_response: &SubgraphResponse, + cache_attributes: CacheAttributes, + ) { + let mut vary_headers = sub_response + .response + .headers() + .get_all(header::VARY) + .into_iter() + .filter_map(|val| { + val.to_str().ok().map(|v| { + v.to_string() + .split(", ") + .map(|s| s.to_string()) + .collect::>() + }) + }) + .flatten() + .collect::>(); + vary_headers.sort(); + let vary_headers = vary_headers.join(", "); + + let hashed_headers = if vary_headers.is_empty() { + Arc::default() + } else { + Arc::new(hash_vary_headers(&cache_attributes.headers)) + }; + counter.lock().record( + cache_attributes.hashed_query.clone(), + cache_attributes.subgraph_name.clone(), + hashed_headers, + cache_attributes.representations, + ); + } + fn store_subgraph_request_attributes( attribute_forward_config: Arc>, sub_request: &Request, @@ -1052,12 +1148,15 @@ impl Telemetry { .insert(SubgraphMetricsAttributes(attributes)); //.unwrap(); } + #[allow(clippy::too_many_arguments)] fn store_subgraph_response_attributes( context: &Context, metrics: BasicMetrics, subgraph_attribute: KeyValue, attribute_forward_config: Arc>, now: Instant, + counter: Option>>, + cache_attributes: Option, result: &Result, ) { let mut metric_attrs = { @@ -1088,6 +1187,21 @@ impl Telemetry { match &result { Ok(response) => { + if let Some(cache_attributes) = cache_attributes { + if let Ok(cache_control) = response + .response + .headers() + .get(header::CACHE_CONTROL) + .ok_or(()) + .and_then(|val| val.to_str().map(|v| v.to_string()).map_err(|_| ())) + { + metric_attrs.push(KeyValue::new("cache_control", cache_control)); + } + + if let Some(counter) = counter { + Self::update_cache_metrics(counter, response, cache_attributes) + } + } metric_attrs.push(KeyValue::new( "status", response.response.status().as_u16().to_string(), @@ -1554,6 +1668,113 @@ impl Telemetry { } } +#[derive(Debug, Clone)] +struct CacheAttributes { + subgraph_name: Arc, + headers: http::HeaderMap, + hashed_query: Arc, + // Typename + hashed_representation + representations: Vec<(Arc, Value)>, +} + +#[derive(Debug, Hash, Clone)] +struct CacheKey { + representation: Value, + typename: Arc, + query: Arc, + subgraph_name: Arc, + hashed_headers: Arc, +} + +// Get typename and hashed representation for each representations in the subgraph query +fn extract_cache_attributes( + representations: &[Value], +) -> Result, Value)>, BoxError> { + let mut res = Vec::new(); + for representation in representations { + let opt_type = representation + .as_object() + .and_then(|o| o.get(TYPENAME)) + .ok_or("missing __typename in representation")?; + let typename = opt_type.as_str().unwrap_or(""); + + res.push((Arc::new(typename.to_string()), representation.clone())); + } + Ok(res) +} + +struct CacheCounter { + primary: Bloom, + secondary: Bloom, + created_at: Instant, + ttl: Duration, +} + +impl CacheCounter { + fn new(ttl: Duration) -> Self { + Self { + primary: Self::make_filter(), + secondary: Self::make_filter(), + created_at: Instant::now(), + ttl, + } + } + + fn make_filter() -> Bloom { + // the filter is around 4kB in size (can be calculated with `Bloom::compute_bitmap_size`) + Bloom::new_for_fp_rate(10000, 0.2) + } + + fn record( + &mut self, + query: Arc, + subgraph_name: Arc, + hashed_headers: Arc, + representations: Vec<(Arc, Value)>, + ) { + if self.created_at.elapsed() >= self.ttl { + self.clear(); + } + + // typename -> (nb of cache hits, nb of entities) + let mut seen: HashMap, (usize, usize)> = HashMap::new(); + for (typename, representation) in representations { + let cache_hit = self.check(&CacheKey { + representation, + typename: typename.clone(), + query: query.clone(), + subgraph_name: subgraph_name.clone(), + hashed_headers: hashed_headers.clone(), + }); + + let seen_entry = seen.entry(typename.clone()).or_default(); + if cache_hit { + seen_entry.0 += 1; + } + seen_entry.1 += 1; + } + + for (typename, (cache_hit, total_entities)) in seen.into_iter() { + ::tracing::info!( + histogram.apollo.router.operations.entity.cache_hit = (cache_hit as f64 / total_entities as f64) * 100f64, + entity_type = %typename, + subgraph = %subgraph_name, + ); + } + } + + fn check(&mut self, key: &CacheKey) -> bool { + self.primary.check_and_set(key) || self.secondary.check(key) + } + + fn clear(&mut self) { + let secondary = std::mem::replace(&mut self.primary, Self::make_filter()); + self.secondary = secondary; + + self.created_at = Instant::now(); + } +} + fn filter_headers(headers: &HeaderMap, forward_rules: &ForwardHeaders) -> String { let headers_map = headers .iter() diff --git a/apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs b/apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs index 9489189891..25f260977c 100644 --- a/apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs +++ b/apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs @@ -847,7 +847,7 @@ mod test { use opentelemetry::Value; use prost::Message; use serde_json::json; - use crate::plugins::telemetry::apollo::{ErrorConfiguration}; + use crate::plugins::telemetry::apollo::ErrorConfiguration; use crate::plugins::telemetry::apollo_exporter::proto::reports::Trace; use crate::plugins::telemetry::apollo_exporter::proto::reports::trace::query_plan_node::{DeferNodePrimary, DeferredNode, ResponsePathElement}; use crate::plugins::telemetry::apollo_exporter::proto::reports::trace::{QueryPlanNode, Node, Error}; diff --git a/apollo-router/src/plugins/traffic_shaping/cache.rs b/apollo-router/src/plugins/traffic_shaping/cache.rs index f52ac67061..abb1e7031c 100644 --- a/apollo-router/src/plugins/traffic_shaping/cache.rs +++ b/apollo-router/src/plugins/traffic_shaping/cache.rs @@ -5,8 +5,10 @@ use std::time::Duration; use futures::future::BoxFuture; use futures::FutureExt; +use http::header; use serde::Deserialize; use serde::Serialize; +use serde_json_bytes::ByteString; use serde_json_bytes::Value; use sha2::Digest; use sha2::Sha256; @@ -25,6 +27,9 @@ use crate::json_ext::Object; use crate::services::subgraph; use crate::spec::TYPENAME; +const ENTITIES: &str = "_entities"; +pub(crate) const REPRESENTATIONS: &str = "representations"; + #[derive(Clone)] pub(crate) struct SubgraphCacheLayer { storage: RedisCacheStorage, @@ -83,14 +88,14 @@ where Poll::Ready(Ok(())) } - fn call(&mut self, mut request: subgraph::Request) -> Self::Future { + fn call(&mut self, request: subgraph::Request) -> Self::Future { let service = self.service.clone(); if !request .subgraph_request - .body_mut() + .body() .variables - .contains_key("representations") + .contains_key(REPRESENTATIONS) { return service.oneshot(request).boxed(); } @@ -118,9 +123,11 @@ where let body = request.subgraph_request.body_mut(); let query_hash = hash_request(body); + // TODO: compute TTL with cacheControl directive on the subgraph + let representations = body .variables - .get_mut("representations") + .get_mut(REPRESENTATIONS) .and_then(|value| value.as_array_mut()) .expect("we already checked that representations exist"); @@ -132,11 +139,11 @@ where .unwrap_or_else(|| std::iter::repeat(None).take(keys.len()).collect()); let (new_representations, mut result) = - filter_representations(representations, keys, cache_result)?; + filter_representations(&name, representations, keys, cache_result)?; if !new_representations.is_empty() { body.variables - .insert("representations", new_representations.into()); + .insert(REPRESENTATIONS, new_representations.into()); let mut response = service.oneshot(request).await?; @@ -145,7 +152,7 @@ where if let Some(mut entities) = data .as_mut() .and_then(|v| v.as_object_mut()) - .and_then(|o| o.remove("_entities")) + .and_then(|o| o.remove(ENTITIES)) { let new_entities = insert_entities_in_result( entities @@ -160,7 +167,7 @@ where data.as_mut() .and_then(|v| v.as_object_mut()) - .map(|o| o.insert("_entities", new_entities.into())); + .map(|o| o.insert(ENTITIES, new_entities.into())); response.response.body_mut().data = data; } @@ -168,7 +175,7 @@ where } else { let entities = insert_entities_in_result(&mut Vec::new(), &cache, &mut result).await?; let mut data = Object::default(); - data.insert("_entities", entities.into()); + data.insert(ENTITIES, entities.into()); Ok(subgraph::Response::builder() .data(data) @@ -178,14 +185,42 @@ where } } -fn hash_request(body: &graphql::Request) -> String { +pub(crate) fn hash_vary_headers(headers: &http::HeaderMap) -> String { + let mut digest = Sha256::new(); + + for vary_header_value in headers.get_all(header::VARY).into_iter() { + if vary_header_value == "*" { + return String::from("*"); + } else { + let header_names = match vary_header_value.to_str() { + Ok(header_val) => header_val.split(", "), + Err(_) => continue, + }; + header_names.for_each(|header_name| { + if let Some(header_value) = headers.get(header_name).and_then(|h| h.to_str().ok()) { + digest.update(header_value); + digest.update(&[0u8; 1][..]); + } + }); + } + } + + hex::encode(digest.finalize().as_slice()) +} + +pub(crate) fn hash_request(body: &mut graphql::Request) -> String { let mut digest = Sha256::new(); digest.update(body.query.as_deref().unwrap_or("-").as_bytes()); digest.update(&[0u8; 1][..]); digest.update(body.operation_name.as_deref().unwrap_or("-").as_bytes()); digest.update(&[0u8; 1][..]); + let repr_key = ByteString::from(REPRESENTATIONS); + // Removing the representations variable because it's already part of the cache key + let representations = body.variables.remove(&repr_key); digest.update(&serde_json::to_vec(&body.variables).unwrap()); - + if let Some(representations) = representations { + body.variables.insert(repr_key, representations); + } hex::encode(digest.finalize().as_slice()) } @@ -204,19 +239,21 @@ fn extract_cache_keys( reason: "missing __typename in representation".to_string(), })?; - let typename = opt_type.as_str().unwrap_or("-").to_string(); + let typename = opt_type.as_str().unwrap_or("-"); + + // We have to have representation because it can contains PII + let mut digest = Sha256::new(); + digest.update(serde_json::to_string(&representation).unwrap().as_bytes()); + let hashed_repr = hex::encode(digest.finalize().as_slice()); let key = format!( "subgraph.{}|{}|{}|{}", - subgraph_name, - &typename, - serde_json::to_string(&representation).unwrap(), - query_hash + subgraph_name, &typename, hashed_repr, query_hash ); representation .as_object_mut() - .map(|o| o.insert("__typename", opt_type)); + .map(|o| o.insert(TYPENAME, opt_type)); res.push(key); } Ok(res) @@ -230,6 +267,7 @@ struct IntermediateResult { // build a new list of representations without the ones we got from the cache fn filter_representations( + subgraph_name: &str, representations: &mut Vec, keys: Vec, mut cache_result: Vec>, @@ -245,7 +283,7 @@ fn filter_representations( { let opt_type = representation .as_object_mut() - .and_then(|o| o.remove("__typename")) + .and_then(|o| o.remove(TYPENAME)) .ok_or_else(|| FetchError::MalformedRequest { reason: "missing __typename in representation".to_string(), })?; @@ -257,7 +295,7 @@ fn filter_representations( representation .as_object_mut() - .map(|o| o.insert("__typename", opt_type)); + .map(|o| o.insert(TYPENAME, opt_type)); new_representations.push(representation); } else { cache_hit.entry(typename.clone()).or_default().0 += 1; @@ -270,11 +308,17 @@ fn filter_representations( } for (ty, (hit, miss)) in cache_hit { - tracing::event!( - Level::INFO, + tracing::info!( + monotonic_counter.apollo.router.operations.entity.cache = hit as u64, + entity_type = ty.as_str(), + hit = %true, + %subgraph_name + ); + tracing::info!( + monotonic_counter.apollo.router.operations.entity.cache = miss as u64, entity_type = ty.as_str(), - cache_hit = hit, - cache_miss = miss + miss = %true, + %subgraph_name ); } @@ -317,6 +361,7 @@ async fn insert_entities_in_result( } if !to_insert.is_empty() { + // TODO use insert_multiple_with_ttl cache.insert_multiple(&to_insert).await; } diff --git a/apollo-router/src/plugins/traffic_shaping/mod.rs b/apollo-router/src/plugins/traffic_shaping/mod.rs index 68b0a0d286..a85bdce659 100644 --- a/apollo-router/src/plugins/traffic_shaping/mod.rs +++ b/apollo-router/src/plugins/traffic_shaping/mod.rs @@ -6,7 +6,7 @@ //! * Compression //! * Rate limiting //! -mod cache; +pub(crate) mod cache; mod deduplication; pub(crate) mod rate; mod retry; @@ -379,10 +379,9 @@ impl TrafficShaping { let all_config = self.config.all.as_ref(); let subgraph_config = self.config.subgraphs.get(name); let final_config = Self::merge_config(all_config, subgraph_config); - let entity_caching = if let (Some(storage), Some(caching_config)) = ( self.storage.clone(), - subgraph_config + final_config .as_ref() .and_then(|c| c.experimental_entity_caching.as_ref()), ) { From 72bc0e10ab7dda0032c31f3619c52542011cf292 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Wed, 6 Sep 2023 14:23:56 +0200 Subject: [PATCH 34/50] remove a cloned entity from response_at_path (#3759) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit there was always one additional clone before, even when the entity was used in only one place. In local benchmarks, I could see this remove 200μs from a query --- apollo-router/src/query_planner/fetch.rs | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/apollo-router/src/query_planner/fetch.rs b/apollo-router/src/query_planner/fetch.rs index 41e2b233e3..c7b4e7216f 100644 --- a/apollo-router/src/query_planner/fetch.rs +++ b/apollo-router/src/query_planner/fetch.rs @@ -367,11 +367,19 @@ impl FetchNode { if let Value::Array(array) = entities { let mut value = Value::default(); - for (path, entity_idx) in paths { - if let Some(entity) = array.get(entity_idx) { - let mut data = entity.clone(); - rewrites::apply_rewrites(schema, &mut data, &self.output_rewrites); - let _ = value.insert(&path, data); + for (index, mut entity) in array.into_iter().enumerate() { + rewrites::apply_rewrites(schema, &mut entity, &self.output_rewrites); + + if let Some(paths) = inverted_paths.get(&index) { + if paths.len() > 1 { + for path in &paths[1..] { + let _ = value.insert(path, entity.clone()); + } + } + + if let Some(path) = paths.first() { + let _ = value.insert(path, entity); + } } } return (value, errors); From a2d5f6f7de1df4e4d5a09fd793a1e0d049a32663 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Wed, 6 Sep 2023 15:09:04 +0200 Subject: [PATCH 35/50] reference a github discussion about GraphOS Authorization (#3755) --- apollo-router/feature_discussions.json | 4 ++-- .../tests/snapshots/lifecycle_tests__cli_config_preview.snap | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/apollo-router/feature_discussions.json b/apollo-router/feature_discussions.json index 775e1b84b8..a972a3a506 100644 --- a/apollo-router/feature_discussions.json +++ b/apollo-router/feature_discussions.json @@ -6,6 +6,6 @@ "experimental_http_max_request_bytes": "https://github.com/apollographql/router/discussions/3220" }, "preview": { - "preview_directives": "https://github.com/apollographql/router/discussions/???" + "preview_directives": "https://github.com/apollographql/router/discussions/3754" } -} \ No newline at end of file +} diff --git a/apollo-router/tests/snapshots/lifecycle_tests__cli_config_preview.snap b/apollo-router/tests/snapshots/lifecycle_tests__cli_config_preview.snap index 7aea987112..9cd540f304 100644 --- a/apollo-router/tests/snapshots/lifecycle_tests__cli_config_preview.snap +++ b/apollo-router/tests/snapshots/lifecycle_tests__cli_config_preview.snap @@ -9,5 +9,5 @@ stderr: stdout: List of all preview configurations with related GitHub discussions: - - preview_directives: https://github.com/apollographql/router/discussions/??? + - preview_directives: https://github.com/apollographql/router/discussions/3754 From cef15b52ca5f19044c3938d6de6f606f747ae7d0 Mon Sep 17 00:00:00 2001 From: Simon Sapin Date: Wed, 6 Sep 2023 15:36:45 +0200 Subject: [PATCH 36/50] Replace atty crate with std (#3729) The crate is unmaintained, and the standard library has equivalent functionality since Rust 1.70.0 * https://github.com/apollographql/router/security/dependabot/68 * https://doc.rust-lang.org/stable/std/io/trait.IsTerminal.html Co-authored-by: Geoffroy Couprie --- .changesets/maint_simon_is_terminal.md | 8 ++++++++ Cargo.lock | 1 - apollo-router/Cargo.toml | 1 - apollo-router/src/error.rs | 3 ++- apollo-router/src/plugins/telemetry/config.rs | 3 ++- apollo-router/src/plugins/telemetry/reload.rs | 3 ++- 6 files changed, 14 insertions(+), 5 deletions(-) create mode 100644 .changesets/maint_simon_is_terminal.md diff --git a/.changesets/maint_simon_is_terminal.md b/.changesets/maint_simon_is_terminal.md new file mode 100644 index 0000000000..7295cd62d3 --- /dev/null +++ b/.changesets/maint_simon_is_terminal.md @@ -0,0 +1,8 @@ +### Replace atty crate with std ([PR #3729](https://github.com/apollographql/router/pull/3729)) + +The crate is unmaintained, and the standard library has equivalent functionality since Rust 1.70.0 + +* https://github.com/apollographql/router/security/dependabot/68 +* https://doc.rust-lang.org/stable/std/io/trait.IsTerminal.html + +By [@SimonSapin](https://github.com/SimonSapin) in https://github.com/apollographql/router/pull/3729 diff --git a/Cargo.lock b/Cargo.lock index 212d0f17d8..2d3686a8f1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -274,7 +274,6 @@ dependencies = [ "askama", "async-compression", "async-trait", - "atty", "aws-config", "aws-credential-types", "aws-sigv4", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index b9abf26a6b..1e33618bb9 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -64,7 +64,6 @@ async-compression = { version = "0.4.1", features = [ "deflate", ] } async-trait = "0.1.73" -atty = "0.2.14" axum = { version = "0.6.20", features = ["headers", "json", "original-uri"] } base64 = "0.21.2" bloomfilter = "1.0.12" diff --git a/apollo-router/src/error.rs b/apollo-router/src/error.rs index fe844c04ee..2902d50b10 100644 --- a/apollo-router/src/error.rs +++ b/apollo-router/src/error.rs @@ -1,4 +1,5 @@ //! Router errors. +use std::io::IsTerminal; use std::sync::Arc; use displaydoc::Display; @@ -553,7 +554,7 @@ impl ValidationErrors { pub(crate) fn print(&self) { if LevelFilter::current() == LevelFilter::OFF && cfg!(not(debug_assertions)) { return; - } else if atty::is(atty::Stream::Stdout) { + } else if std::io::stdout().is_terminal() { // Fancy reports for TTYs self.errors.iter().for_each(|err| { // `format!` works around https://github.com/rust-lang/rust/issues/107118 diff --git a/apollo-router/src/plugins/telemetry/config.rs b/apollo-router/src/plugins/telemetry/config.rs index c0cff8118f..7d9418dee6 100644 --- a/apollo-router/src/plugins/telemetry/config.rs +++ b/apollo-router/src/plugins/telemetry/config.rs @@ -1,6 +1,7 @@ //! Configuration for the telemetry plugin. use std::collections::BTreeMap; use std::env; +use std::io::IsTerminal; use axum::headers::HeaderName; use opentelemetry::sdk::resource::EnvResourceDetector; @@ -300,7 +301,7 @@ pub(crate) enum LoggingFormat { impl Default for LoggingFormat { fn default() -> Self { - if atty::is(atty::Stream::Stdout) { + if std::io::stdout().is_terminal() { Self::Pretty } else { Self::Json diff --git a/apollo-router/src/plugins/telemetry/reload.rs b/apollo-router/src/plugins/telemetry/reload.rs index 064763405d..96296341e5 100644 --- a/apollo-router/src/plugins/telemetry/reload.rs +++ b/apollo-router/src/plugins/telemetry/reload.rs @@ -1,3 +1,4 @@ +use std::io::IsTerminal; use std::sync::atomic::AtomicU64; use std::sync::atomic::Ordering; @@ -81,7 +82,7 @@ pub(crate) fn init_telemetry(log_level: &str) -> Result<()> { .with_filter(SamplingFilter::new()); // We choose json or plain based on tty - let fmt = if atty::is(atty::Stream::Stdout) { + let fmt = if std::io::stdout().is_terminal() { tracing_subscriber::fmt::Layer::new() .event_format(FilteringFormatter::new( TextFormatter::new() From 99a26db59dc68fa95bdf18efdffadbb99c97624b Mon Sep 17 00:00:00 2001 From: Bryn Cooke Date: Thu, 7 Sep 2023 08:00:06 +0100 Subject: [PATCH 37/50] Add apollo.router.id to otlp metrics metadata (#3764) Currently apollo metrics may have the same metadata between multiple routers. This means that we can't disambiguate when multiple routers are sending data. Add a random UUID to the metadata. This has no user facing impact. Note: there is no unit test or integration test for this. We need to take a broader look at how we do telemetry testing, and the solution should probably involve otel collector. However that is beyond the scope of this ticket. **Checklist** Complete the checklist (and note appropriate exceptions) before a final PR is raised. - [ ] Changes are compatible[^1] - [ ] Documentation[^2] completed - [ ] Performance impact assessed and acceptable - Tests added and passing[^3] - [ ] Unit Tests - [ ] Integration Tests - [ ] Manual Tests **Exceptions** *Note any exceptions here* **Notes** [^1]. It may be appropriate to bring upcoming changes to the attention of other (impacted) groups. Please endeavour to do this before seeking PR approval. The mechanism for doing this will vary considerably, so use your judgement as to how and when to do this. [^2]. Configuration is an important part of many changes. Where applicable please try to document configuration examples. [^3]. Tick whichever testing boxes are applicable. If you are adding Manual Tests: - please document the manual testing (extensively) in the Exceptions. - please raise a separate issue to automate the test and label it (or ask for it to be labeled) as `manual test` --------- Co-authored-by: bryn Co-authored-by: Brandt Kinzle <114622981+bkinzle@users.noreply.github.com> --- apollo-router/src/plugins/telemetry/metrics/apollo.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/apollo-router/src/plugins/telemetry/metrics/apollo.rs b/apollo-router/src/plugins/telemetry/metrics/apollo.rs index ef10ca8006..6424705c24 100644 --- a/apollo-router/src/plugins/telemetry/metrics/apollo.rs +++ b/apollo-router/src/plugins/telemetry/metrics/apollo.rs @@ -1,6 +1,7 @@ //! Apollo metrics use std::sync::atomic::AtomicBool; use std::sync::atomic::Ordering; +use std::sync::OnceLock; use std::time::Duration; use opentelemetry::sdk::export::metrics::aggregation; @@ -12,6 +13,7 @@ use sys_info::hostname; use tonic::metadata::MetadataMap; use tower::BoxError; use url::Url; +use uuid::Uuid; use crate::plugins::telemetry::apollo::Config; use crate::plugins::telemetry::apollo_exporter::get_uname; @@ -31,6 +33,9 @@ fn default_buckets() -> Vec { ] } +// Random unique UUID for the Router. This doesn't actually identify the router, it just allows disambiguation between multiple routers with the same metadata. +static ROUTER_ID: OnceLock = OnceLock::new(); + impl MetricsConfigurator for Config { fn apply( &self, @@ -105,6 +110,10 @@ impl Config { opentelemetry::runtime::Tokio, ) .with_resource(Resource::new([ + KeyValue::new( + "apollo.router.id", + ROUTER_ID.get_or_init(Uuid::new_v4).to_string(), + ), KeyValue::new("apollo.graph.ref", reference.to_string()), KeyValue::new("apollo.schema.id", schema_id.to_string()), KeyValue::new( From 16a3c8d7c51cbb0fbbb04f8fdda8ce86cd95863c Mon Sep 17 00:00:00 2001 From: Coenen Benjamin Date: Thu, 7 Sep 2023 11:00:55 +0200 Subject: [PATCH 38/50] fix(subscription): force the deduplication to be enabled by default as it's documented (#3773) `subscription.enable_deduplication` was documented to be `true` by default but it wasn't really the case. --------- Signed-off-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> Co-authored-by: Jeremy Lempereur --- ...njjj_subscription_default_configuration.md | 8 +++++ ...nfiguration__tests__schema_generation.snap | 6 ++-- apollo-router/src/plugins/subscription.rs | 29 +++++++++++++++---- 3 files changed, 33 insertions(+), 10 deletions(-) create mode 100644 .changesets/fix_bnjjj_subscription_default_configuration.md diff --git a/.changesets/fix_bnjjj_subscription_default_configuration.md b/.changesets/fix_bnjjj_subscription_default_configuration.md new file mode 100644 index 0000000000..2c4a5c207a --- /dev/null +++ b/.changesets/fix_bnjjj_subscription_default_configuration.md @@ -0,0 +1,8 @@ +### fix(subscription): force the deduplication to be enabled by default as it's documented ([PR #3773](https://github.com/apollographql/router/pull/3773)) + +A bug was introduced in router v1.25.0 which caused [subscription deduplication](https://www.apollographql.com/docs/router/executing-operations/subscription-support#subscription-deduplication) to be disabled by default. +As documented, the router will enable deduplication by default, providing you with subscriptions that scale. + +Should you decide to disable it, you can still explicitly set `enable_deduplication` to `false`. + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/3773 diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index ff9123ab31..14ab198d54 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -1641,17 +1641,15 @@ expression: "&schema" "subscription": { "description": "Subscriptions configuration", "type": "object", - "required": [ - "enabled" - ], "properties": { "enable_deduplication": { "description": "Enable the deduplication of subscription (for example if we detect the exact same request to subgraph we won't open a new websocket to the subgraph in passthrough mode) (default: true)", - "default": false, + "default": true, "type": "boolean" }, "enabled": { "description": "Enable subscription", + "default": true, "type": "boolean" }, "max_opened_subscriptions": { diff --git a/apollo-router/src/plugins/subscription.rs b/apollo-router/src/plugins/subscription.rs index 850f584b60..afd7e5a6d1 100644 --- a/apollo-router/src/plugins/subscription.rs +++ b/apollo-router/src/plugins/subscription.rs @@ -56,31 +56,32 @@ pub(crate) struct Subscription { /// Subscriptions configuration #[derive(Debug, Clone, Deserialize, Serialize, JsonSchema)] -#[serde(deny_unknown_fields)] +#[serde(deny_unknown_fields, default)] pub(crate) struct SubscriptionConfig { /// Enable subscription pub(crate) enabled: bool, /// Select a subscription mode (callback or passthrough) - #[serde(default)] pub(crate) mode: SubscriptionModeConfig, /// Enable the deduplication of subscription (for example if we detect the exact same request to subgraph we won't open a new websocket to the subgraph in passthrough mode) /// (default: true) - #[serde(default)] + #[serde(default = "enable_deduplication_default")] pub(crate) enable_deduplication: bool, /// This is a limit to only have maximum X opened subscriptions at the same time. By default if it's not set there is no limit. - #[serde(default)] pub(crate) max_opened_subscriptions: Option, /// It represent the capacity of the in memory queue to know how many events we can keep in a buffer - #[serde(default)] pub(crate) queue_capacity: Option, } +fn enable_deduplication_default() -> bool { + true +} + impl Default for SubscriptionConfig { fn default() -> Self { Self { enabled: true, mode: Default::default(), - enable_deduplication: true, + enable_deduplication: enable_deduplication_default(), max_opened_subscriptions: None, queue_capacity: None, } @@ -1268,6 +1269,22 @@ mod tests { let subgraph_cfg = config_without_mode.mode.get_subgraph_config("test"); assert_eq!(subgraph_cfg, None); + + let sub_config: SubscriptionConfig = serde_json::from_value(serde_json::json!({ + "mode": { + "preview_callback": { + "public_url": "http://localhost:4000", + "path": "/subscription/callback", + "subgraphs": ["test"] + } + } + })) + .unwrap(); + + assert!(sub_config.enabled); + assert!(sub_config.enable_deduplication); + assert!(sub_config.max_opened_subscriptions.is_none()); + assert!(sub_config.queue_capacity.is_none()); } } From c9af85d7f1e7fd867272bcb7906c58592959532f Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 7 Sep 2023 09:02:37 +0000 Subject: [PATCH 39/50] fix(deps): update rust crate bytes to 1.5.0 --- Cargo.lock | 4 ++-- apollo-router/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2d3686a8f1..f203e3022b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1201,9 +1201,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" [[package]] name = "bytes-utils" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 1e33618bb9..50c7235249 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -68,7 +68,7 @@ axum = { version = "0.6.20", features = ["headers", "json", "original-uri"] } base64 = "0.21.2" bloomfilter = "1.0.12" buildstructor = "0.5.3" -bytes = "1.4.0" +bytes = "1.5.0" clap = { version = "4.4.2", default-features = false, features = [ "env", "derive", From 03015a752db957a3131f174f2bf77d057df7a92c Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Thu, 7 Sep 2023 12:33:18 +0000 Subject: [PATCH 40/50] prep release: v1.30.0-alpha.0 --- Cargo.lock | 6 ++-- apollo-router-benchmarks/Cargo.toml | 2 +- apollo-router-scaffold/Cargo.toml | 2 +- .../templates/base/Cargo.toml | 2 +- .../templates/base/xtask/Cargo.toml | 2 +- apollo-router/Cargo.toml | 2 +- .../tracing/docker-compose.datadog.yml | 2 +- dockerfiles/tracing/docker-compose.jaeger.yml | 2 +- dockerfiles/tracing/docker-compose.zipkin.yml | 2 +- docs/source/containerization/docker.mdx | 2 +- docs/source/containerization/kubernetes.mdx | 28 +++++++++---------- helm/chart/router/Chart.yaml | 4 +-- helm/chart/router/README.md | 6 ++-- licenses.html | 22 +++++++++++++-- scripts/install.sh | 2 +- 15 files changed, 51 insertions(+), 35 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f203e3022b..f5c916d3be 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -263,7 +263,7 @@ dependencies = [ [[package]] name = "apollo-router" -version = "1.29.1" +version = "1.30.0-alpha.0" dependencies = [ "access-json", "anyhow", @@ -409,7 +409,7 @@ dependencies = [ [[package]] name = "apollo-router-benchmarks" -version = "1.29.1" +version = "1.30.0-alpha.0" dependencies = [ "apollo-parser 0.6.1", "apollo-router", @@ -425,7 +425,7 @@ dependencies = [ [[package]] name = "apollo-router-scaffold" -version = "1.29.1" +version = "1.30.0-alpha.0" dependencies = [ "anyhow", "cargo-scaffold", diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml index b0a6f0cf9e..77682a342f 100644 --- a/apollo-router-benchmarks/Cargo.toml +++ b/apollo-router-benchmarks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-benchmarks" -version = "1.29.1" +version = "1.30.0-alpha.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml index 9a6031ec2d..b0ebc8ca52 100644 --- a/apollo-router-scaffold/Cargo.toml +++ b/apollo-router-scaffold/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-scaffold" -version = "1.29.1" +version = "1.30.0-alpha.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/templates/base/Cargo.toml b/apollo-router-scaffold/templates/base/Cargo.toml index 0181336ac6..d0f7f03120 100644 --- a/apollo-router-scaffold/templates/base/Cargo.toml +++ b/apollo-router-scaffold/templates/base/Cargo.toml @@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" } apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} # Note if you update these dependencies then also update xtask/Cargo.toml -apollo-router = "1.29.1" +apollo-router = "1.30.0-alpha.0" {{/if}} {{/if}} async-trait = "0.1.52" diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.toml index 080ab782cb..2e8cab7b02 100644 --- a/apollo-router-scaffold/templates/base/xtask/Cargo.toml +++ b/apollo-router-scaffold/templates/base/xtask/Cargo.toml @@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" } {{#if branch}} apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} -apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.29.1" } +apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.30.0-alpha.0" } {{/if}} {{/if}} anyhow = "1.0.58" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 50c7235249..872d45016c 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router" -version = "1.29.1" +version = "1.30.0-alpha.0" authors = ["Apollo Graph, Inc. "] repository = "https://github.com/apollographql/router/" documentation = "https://docs.rs/apollo-router" diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml index 512b2ddceb..d0198984eb 100644 --- a/dockerfiles/tracing/docker-compose.datadog.yml +++ b/dockerfiles/tracing/docker-compose.datadog.yml @@ -3,7 +3,7 @@ services: apollo-router: container_name: apollo-router - image: ghcr.io/apollographql/router:v1.29.1 + image: ghcr.io/apollographql/router:v1.30.0-alpha.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/datadog.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.jaeger.yml b/dockerfiles/tracing/docker-compose.jaeger.yml index e2053883b7..d94d57403c 100644 --- a/dockerfiles/tracing/docker-compose.jaeger.yml +++ b/dockerfiles/tracing/docker-compose.jaeger.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router #build: ./router - image: ghcr.io/apollographql/router:v1.29.1 + image: ghcr.io/apollographql/router:v1.30.0-alpha.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/jaeger.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml index 89539bc852..976e6a048c 100644 --- a/dockerfiles/tracing/docker-compose.zipkin.yml +++ b/dockerfiles/tracing/docker-compose.zipkin.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router build: ./router - image: ghcr.io/apollographql/router:v1.29.1 + image: ghcr.io/apollographql/router:v1.30.0-alpha.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/zipkin.router.yaml:/etc/config/configuration.yaml diff --git a/docs/source/containerization/docker.mdx b/docs/source/containerization/docker.mdx index ebcb148767..4db3611508 100644 --- a/docs/source/containerization/docker.mdx +++ b/docs/source/containerization/docker.mdx @@ -11,7 +11,7 @@ The default behaviour of the router images is suitable for a quickstart or devel Note: The [docker documentation](https://docs.docker.com/engine/reference/run/) for the run command may be helpful when reading through the examples. -Note: The exact image version to use is your choice depending on which release you wish to use. In the following examples, replace `` with your chosen version. e.g.: `v1.29.1` +Note: The exact image version to use is your choice depending on which release you wish to use. In the following examples, replace `` with your chosen version. e.g.: `v1.30.0-alpha.0` ## Override the configuration diff --git a/docs/source/containerization/kubernetes.mdx b/docs/source/containerization/kubernetes.mdx index c61cdea299..259f19679c 100644 --- a/docs/source/containerization/kubernetes.mdx +++ b/docs/source/containerization/kubernetes.mdx @@ -13,7 +13,7 @@ import { Link } from 'gatsby'; [Helm](https://helm.sh) is the package manager for kubernetes. -There is a complete [helm chart definition](https://github.com/apollographql/router/tree/v1.29.1/helm/chart/router) in the repo which illustrates how to use helm to deploy the router in kubernetes. +There is a complete [helm chart definition](https://github.com/apollographql/router/tree/v1.30.0-alpha.0/helm/chart/router) in the repo which illustrates how to use helm to deploy the router in kubernetes. In both the following examples, we are using helm to install the router: - into namespace "router-deploy" (create namespace if it doesn't exist) @@ -64,10 +64,10 @@ kind: ServiceAccount metadata: name: release-name-router labels: - helm.sh/chart: router-1.29.1 + helm.sh/chart: router-1.30.0-alpha.0 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.29.1" + app.kubernetes.io/version: "v1.30.0-alpha.0" app.kubernetes.io/managed-by: Helm --- # Source: router/templates/secret.yaml @@ -76,10 +76,10 @@ kind: Secret metadata: name: "release-name-router" labels: - helm.sh/chart: router-1.29.1 + helm.sh/chart: router-1.30.0-alpha.0 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.29.1" + app.kubernetes.io/version: "v1.30.0-alpha.0" app.kubernetes.io/managed-by: Helm data: managedFederationApiKey: "UkVEQUNURUQ=" @@ -90,10 +90,10 @@ kind: ConfigMap metadata: name: release-name-router labels: - helm.sh/chart: router-1.29.1 + helm.sh/chart: router-1.30.0-alpha.0 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.29.1" + app.kubernetes.io/version: "v1.30.0-alpha.0" app.kubernetes.io/managed-by: Helm data: configuration.yaml: | @@ -117,10 +117,10 @@ kind: Service metadata: name: release-name-router labels: - helm.sh/chart: router-1.29.1 + helm.sh/chart: router-1.30.0-alpha.0 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.29.1" + app.kubernetes.io/version: "v1.30.0-alpha.0" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -143,10 +143,10 @@ kind: Deployment metadata: name: release-name-router labels: - helm.sh/chart: router-1.29.1 + helm.sh/chart: router-1.30.0-alpha.0 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.29.1" + app.kubernetes.io/version: "v1.30.0-alpha.0" app.kubernetes.io/managed-by: Helm annotations: @@ -174,7 +174,7 @@ spec: - name: router securityContext: {} - image: "ghcr.io/apollographql/router:v1.29.1" + image: "ghcr.io/apollographql/router:v1.30.0-alpha.0" imagePullPolicy: IfNotPresent args: - --hot-reload @@ -226,10 +226,10 @@ kind: Pod metadata: name: "release-name-router-test-connection" labels: - helm.sh/chart: router-1.29.1 + helm.sh/chart: router-1.30.0-alpha.0 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.29.1" + app.kubernetes.io/version: "v1.30.0-alpha.0" app.kubernetes.io/managed-by: Helm annotations: "helm.sh/hook": test diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml index 69bdb630a8..634a6608b4 100644 --- a/helm/chart/router/Chart.yaml +++ b/helm/chart/router/Chart.yaml @@ -20,10 +20,10 @@ type: application # so it matches the shape of our release process and release automation. # By proxy of that decision, this version uses SemVer 2.0.0, though the prefix # of "v" is not included. -version: 1.29.1 +version: 1.30.0-alpha.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "v1.29.1" \ No newline at end of file +appVersion: "v1.30.0-alpha.0" \ No newline at end of file diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md index 983969d7a9..555a17c3c4 100644 --- a/helm/chart/router/README.md +++ b/helm/chart/router/README.md @@ -2,7 +2,7 @@ [router](https://github.com/apollographql/router) Rust Graph Routing runtime for Apollo Federation -![Version: 1.29.1](https://img.shields.io/badge/Version-1.29.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.29.1](https://img.shields.io/badge/AppVersion-v1.29.1-informational?style=flat-square) +![Version: 1.30.0-alpha.0](https://img.shields.io/badge/Version-1.30.0--alpha.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.30.0-alpha.0](https://img.shields.io/badge/AppVersion-v1.30.0--alpha.0-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ ## Get Repo Info ```console -helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.29.1 +helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.30.0-alpha.0 ``` ## Install Chart @@ -19,7 +19,7 @@ helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.29.1 **Important:** only helm3 is supported ```console -helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.29.1 --values my-values.yaml +helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.30.0-alpha.0 --values my-values.yaml ``` _See [configuration](#configuration) below._ diff --git a/licenses.html b/licenses.html index bbaf4cfa5a..f5fdd07f61 100644 --- a/licenses.html +++ b/licenses.html @@ -47,7 +47,7 @@

Overview of licenses:

  • MIT License (96)
  • Apache License 2.0 (62)
  • BSD 3-Clause "New" or "Revised" License (10)
  • -
  • ISC License (9)
  • +
  • ISC License (10)
  • Creative Commons Zero v1.0 Universal (2)
  • Elastic License 2.0 (2)
  • Mozilla Public License 2.0 (2)
  • @@ -10724,6 +10724,7 @@

    Used by:

    Apache License 2.0

    Used by:

    @@ -11163,6 +11164,7 @@

    Used by:

  • md5
  • num-cmp
  • rhai_codegen
  • +
  • siphasher
  • thrift
  • try_match_inner
  • try_match_inner
  • @@ -12367,6 +12369,22 @@

    Used by:

    The files under third-party/chromium are licensed as described in third-party/chromium/LICENSE. + + +
  • +

    ISC License

    +

    Used by:

    + +
    ISC License:
    +
    +Copyright (c) 2004-2010 by Internet Systems Consortium, Inc. ("ISC")
    +Copyright (c) 1995-2003 by Internet Software Consortium
    +
    +Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies.
    +
    +THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     
  • @@ -15125,11 +15143,9 @@

    Used by:

  • aho-corasick
  • byteorder
  • globset
  • -
  • memchr
  • regex-automata
  • same-file
  • termcolor
  • -
  • walkdir
  • winapi-util
  • This project is dual-licensed under the Unlicense and MIT licenses.
    diff --git a/scripts/install.sh b/scripts/install.sh
    index 47dc365d05..235e23131f 100755
    --- a/scripts/install.sh
    +++ b/scripts/install.sh
    @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="https://github.com/apollographql/router/releases/downloa
     
     # Router version defined in apollo-router's Cargo.toml
     # Note: Change this line manually during the release steps.
    -PACKAGE_VERSION="v1.29.1"
    +PACKAGE_VERSION="v1.30.0-alpha.0"
     
     download_binary() {
         downloader --check
    
    From 3fdb61d1284bacfe7c9d5eb4e71541da75678f0d Mon Sep 17 00:00:00 2001
    From: Geoffroy Couprie 
    Date: Thu, 7 Sep 2023 16:00:33 +0200
    Subject: [PATCH 41/50] remove the rhai JSON functions (#3782)
    
    we need to make the API more consistent
    ---
     .../feat_garypen_2278_rhai_router_service.md  |  9 -------
     apollo-router/src/plugins/rhai/engine.rs      | 27 -------------------
     2 files changed, 36 deletions(-)
    
    diff --git a/.changesets/feat_garypen_2278_rhai_router_service.md b/.changesets/feat_garypen_2278_rhai_router_service.md
    index c105892e4a..526e802477 100644
    --- a/.changesets/feat_garypen_2278_rhai_router_service.md
    +++ b/.changesets/feat_garypen_2278_rhai_router_service.md
    @@ -31,13 +31,4 @@ fn process_response(response) {
     }
     ```
     
    -This PR also introduces two new Rhai functions:
    -
    -```rust
    -json_encode(Object)
    -json_decode(String) -> Object
    -
    -```
    -Which will respectively encode a `Rhai` Object or decode a JSON string into a `Rhai` Object. These functions may be helpful when dealing with String bodies which represent encoded JSON objects.
    -
     By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/3234
    diff --git a/apollo-router/src/plugins/rhai/engine.rs b/apollo-router/src/plugins/rhai/engine.rs
    index ab1dd67194..b4f55d1653 100644
    --- a/apollo-router/src/plugins/rhai/engine.rs
    +++ b/apollo-router/src/plugins/rhai/engine.rs
    @@ -281,32 +281,6 @@ mod router_header_map {
         }
     }
     
    -#[export_module]
    -mod router_json {
    -    pub(crate) type Object = crate::json_ext::Object;
    -    pub(crate) type Value = crate::json_ext::Value;
    -
    -    #[rhai_fn(name = "to_string", pure)]
    -    pub(crate) fn object_to_string(x: &mut Object) -> String {
    -        format!("{x:?}")
    -    }
    -
    -    #[rhai_fn(name = "to_string", pure)]
    -    pub(crate) fn value_to_string(x: &mut Value) -> String {
    -        format!("{x:?}")
    -    }
    -
    -    #[rhai_fn(pure, return_raw)]
    -    pub(crate) fn json_encode(input: &mut Dynamic) -> Result> {
    -        serde_json::to_string(input).map_err(|e| e.to_string().into())
    -    }
    -
    -    #[rhai_fn(pure, return_raw)]
    -    pub(crate) fn json_decode(input: &mut ImmutableString) -> Result> {
    -        serde_json::from_str(input).map_err(|e| e.to_string().into())
    -    }
    -}
    -
     #[export_module]
     mod router_context {
         pub(crate) type Context = crate::Context;
    @@ -1487,7 +1461,6 @@ impl Rhai {
             let mut module = exported_module!(router_plugin);
             combine_with_exported_module!(&mut module, "header", router_header_map);
             combine_with_exported_module!(&mut module, "method", router_method);
    -        combine_with_exported_module!(&mut module, "json", router_json);
             combine_with_exported_module!(&mut module, "context", router_context);
     
             let base64_module = exported_module!(router_base64);
    
    From b6164b38791fbd0178de37ae83d9b38e54dab2a5 Mon Sep 17 00:00:00 2001
    From: =?UTF-8?q?Ren=C3=A9e?= 
    Date: Fri, 8 Sep 2023 15:03:07 +0200
    Subject: [PATCH 42/50] Add logs and extend metrics for
     `graphql_validation_mode: both` (#3674)
    
    This adds logging for query validation errors with either Rust or JS
    when there is a mismatch, i.e. one of them validates but the other does
    not. In other cases we are not really interested in the specific error
    (it will just go back to the user), so we don't need to log there.
    
    To log the Rust validation error well, I now store the ApolloDiagnostics
    that were produced on `Query{}`. `Query` is serializable for caching,
    but ApolloDiagnostic is not. Here I just skipped serializing
    `ApolloDiagnostic` so if `Query` is loaded from cache, it does not have
    the validation error stored. I'm not sure this is the right thing to do.
    The ApolloDiagnostics are later used after query planning (which may
    produce a JS validation error). So it's correct if we can ~safely assume
    that we only have valid Query instances cached. Otherwise we might get
    spurious error logs from this.
    - [ ] So is that a safe assumption? Reading the CachingQueryPlanner
    implementation I think it does only store errors (then it's not a
    `Query` instance) and fully successful planning (then it has run both
    Rust and JS validation already). So it looks fine, but it could be a bit
    brittle to rely on this.
    
    I also simplified the validation error printing which
    - [x] depends on https://github.com/apollographql/apollo-rs/pull/630.
    - [x] and on https://github.com/apollographql/router/pull/3675
    
    
    
    **Checklist**
    
    Complete the checklist (and note appropriate exceptions) before a final
    PR is raised.
    
    - [ ] Changes are compatible[^1]
    - [ ] Documentation[^2] completed
    - [ ] Performance impact assessed and acceptable
    - Tests added and passing[^3]
        - [ ] Unit Tests
        - [ ] Integration Tests
        - [ ] Manual Tests
    
    **Exceptions**
    
    *Note any exceptions here*
    
    **Notes**
    
    [^1]. It may be appropriate to bring upcoming changes to the attention
    of other (impacted) groups. Please endeavour to do this before seeking
    PR approval. The mechanism for doing this will vary considerably, so use
    your judgement as to how and when to do this.
    [^2]. Configuration is an important part of many changes. Where
    applicable please try to document configuration examples.
    [^3]. Tick whichever testing boxes are applicable. If you are adding
    Manual Tests:
    - please document the manual testing (extensively) in the Exceptions.
    - please raise a separate issue to automate the test and label it (or
    ask for it to be labeled) as `manual test`
    ---
     apollo-router/src/error.rs                    | 55 +++++--------
     .../src/query_planner/bridge_query_planner.rs | 80 +++++++++++++------
     apollo-router/src/spec/query.rs               | 15 ++--
     apollo-router/src/spec/schema.rs              |  1 -
     4 files changed, 86 insertions(+), 65 deletions(-)
    
    diff --git a/apollo-router/src/error.rs b/apollo-router/src/error.rs
    index 2902d50b10..3ca86dc198 100644
    --- a/apollo-router/src/error.rs
    +++ b/apollo-router/src/error.rs
    @@ -1,5 +1,4 @@
     //! Router errors.
    -use std::io::IsTerminal;
     use std::sync::Arc;
     
     use displaydoc::Display;
    @@ -11,7 +10,6 @@ use serde::Deserialize;
     use serde::Serialize;
     use thiserror::Error;
     use tokio::task::JoinError;
    -use tracing::level_filters::LevelFilter;
     
     pub(crate) use crate::configuration::ConfigurationError;
     pub(crate) use crate::graphql::Error;
    @@ -438,6 +436,14 @@ impl From for QueryPlannerError {
         }
     }
     
    +impl From for QueryPlannerError {
    +    fn from(err: ValidationErrors) -> Self {
    +        // This needs to be serializable, so eagerly stringify the non-serializable
    +        // ApolloDiagnostics.
    +        QueryPlannerError::SpecError(SpecError::ValidationError(err.to_string()))
    +    }
    +}
    +
     impl From for QueryPlannerError {
         fn from(error: router_bridge::error::Error) -> Self {
             QueryPlannerError::RouterBridgeError(error)
    @@ -503,9 +509,9 @@ pub(crate) enum SchemaError {
         UrlParse(String, http::uri::InvalidUri),
         /// Could not find an URL for subgraph {0}
         MissingSubgraphUrl(String),
    -    /// GraphQL parser error(s).
    +    /// GraphQL parser error: {0}
         Parse(ParseErrors),
    -    /// GraphQL parser or validation error(s).
    +    /// GraphQL validation error: {0}
         Validate(ValidationErrors),
         /// Api error(s): {0}
         Api(String),
    @@ -520,11 +526,16 @@ pub(crate) struct ParseErrors {
     impl std::fmt::Display for ParseErrors {
         fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
             let mut errors = self.errors.iter();
    -        if let Some(error) = errors.next() {
    -            write!(f, "{}", error.message())?;
    +        for (i, error) in errors.by_ref().take(5).enumerate() {
    +            if i > 0 {
    +                f.write_str("\n")?;
    +            }
    +            // TODO(@goto-bus-stop): display line/column once that is exposed from apollo-rs
    +            write!(f, "at index {}: {}", error.index(), error.message())?;
             }
    -        for error in errors {
    -            write!(f, "\n{}", error.message())?;
    +        let remaining = errors.count();
    +        if remaining > 0 {
    +            write!(f, "\n...and {remaining} other errors")?;
             }
             Ok(())
         }
    @@ -540,39 +551,15 @@ impl std::fmt::Display for ValidationErrors {
         fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
             let mut errors = self.errors.iter();
             if let Some(error) = errors.next() {
    -            write!(f, "{}", error.data)?;
    +            write!(f, "at index {}: {}", error.location.offset(), error.data)?;
             }
             for error in errors {
    -            write!(f, "\n{}", error.data)?;
    +            write!(f, "\nat index {}: {}", error.location.offset(), error.data)?;
             }
             Ok(())
         }
     }
     
    -impl ValidationErrors {
    -    #[allow(clippy::needless_return)]
    -    pub(crate) fn print(&self) {
    -        if LevelFilter::current() == LevelFilter::OFF && cfg!(not(debug_assertions)) {
    -            return;
    -        } else if std::io::stdout().is_terminal() {
    -            // Fancy reports for TTYs
    -            self.errors.iter().for_each(|err| {
    -                // `format!` works around https://github.com/rust-lang/rust/issues/107118
    -                // to test the panic from https://github.com/apollographql/router/issues/2269
    -                #[allow(clippy::format_in_format_args)]
    -                {
    -                    println!("{}", format!("{err}"));
    -                }
    -            });
    -        } else {
    -            // Best effort to display errors
    -            self.errors.iter().for_each(|diag| {
    -                println!("{}", diag.data);
    -            });
    -        };
    -    }
    -}
    -
     #[cfg(test)]
     mod tests {
         use super::*;
    diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs
    index 7805d127f9..4a677aaad1 100644
    --- a/apollo-router/src/query_planner/bridge_query_planner.rs
    +++ b/apollo-router/src/query_planner/bridge_query_planner.rs
    @@ -275,6 +275,50 @@ impl BridgeQueryPlanner {
             operation: Option,
             selections: Query,
         ) -> Result {
    +        /// Compare errors from graphql-js and apollo-rs validation, and produce metrics on
    +        /// whether they had the same result.
    +        ///
    +        /// The result isn't inspected deeply: it only checks validation success/failure.
    +        fn compare_validation_errors(
    +            js_validation_error: Option<&router_bridge::planner::PlanErrors>,
    +            rs_validation_error: Option<&crate::error::ValidationErrors>,
    +        ) {
    +            let is_validation_error = js_validation_error
    +                .map_or(false, |js| js.errors.iter().all(|err| err.validation_error));
    +            match (is_validation_error, rs_validation_error) {
    +                (false, Some(validation_error)) => {
    +                    tracing::warn!(
    +                        monotonic_counter.apollo.router.validation = 1u64,
    +                        validation.source = VALIDATION_SOURCE_OPERATION,
    +                        validation.result = VALIDATION_FALSE_POSITIVE,
    +                        "validation mismatch: JS query planner did not report query validation error, but apollo-rs did"
    +                    );
    +                    tracing::warn!(
    +                        "validation mismatch: Rust validation reported: {validation_error}"
    +                    );
    +                }
    +                (true, None) => {
    +                    tracing::warn!(
    +                        monotonic_counter.apollo.router.validation = 1u64,
    +                        validation.source = VALIDATION_SOURCE_OPERATION,
    +                        validation.result = VALIDATION_FALSE_NEGATIVE,
    +                        "validation mismatch: apollo-rs did not report query validation error, but JS query planner did"
    +                    );
    +                    tracing::warn!(
    +                        "validation mismatch: JS validation reported: {}",
    +                        // Unwrapping is safe because `is_validation_error` is true
    +                        js_validation_error.unwrap(),
    +                    );
    +                }
    +                // if JS and Rust implementations agree, we return the JS result for now.
    +                _ => tracing::info!(
    +                    monotonic_counter.apollo.router.validation = 1u64,
    +                    validation.source = VALIDATION_SOURCE_OPERATION,
    +                    validation.result = VALIDATION_MATCH,
    +                ),
    +            }
    +        }
    +
             let planner_result = self
                 .planner
                 .plan(filtered_query.clone(), operation.clone())
    @@ -282,35 +326,23 @@ impl BridgeQueryPlanner {
                 .map_err(QueryPlannerError::RouterBridgeError)?
                 .into_result()
                 .map_err(|err| {
    -                let is_validation_error = err.errors.iter().all(|err| err.validation_error);
    -                match (is_validation_error, &selections.validation_error) {
    -                    (false, Some(_)) => {
    -                        tracing::warn!(
    -                            monotonic_counter.apollo.router.validation = 1u64,
    -                            validation.source = VALIDATION_SOURCE_OPERATION,
    -                            validation.result = VALIDATION_FALSE_POSITIVE,
    -                            "validation mismatch: JS query planner did not report query validation error, but apollo-rs did"
    -                        );
    -                    }
    -                    (true, None) => {
    -                        tracing::warn!(
    -                            monotonic_counter.apollo.router.validation = 1u64,
    -                            validation.source = VALIDATION_SOURCE_OPERATION,
    -                            validation.result = VALIDATION_FALSE_NEGATIVE,
    -                            "validation mismatch: apollo-rs did not report query validation error, but JS query planner did"
    -                        );
    -                    }
    -                    // if JS and Rust implementations agree, we return the JS result for now.
    -                    _ => tracing::info!(
    -                            monotonic_counter.apollo.router.validation = 1u64,
    -                            validation.source = VALIDATION_SOURCE_OPERATION,
    -                            validation.result = VALIDATION_MATCH,
    -                    ),
    +                if matches!(
    +                    self.configuration.experimental_graphql_validation_mode,
    +                    GraphQLValidationMode::Both
    +                ) {
    +                    compare_validation_errors(Some(&err), selections.validation_error.as_ref());
                     }
     
                     QueryPlannerError::from(err)
                 })?;
     
    +        if matches!(
    +            self.configuration.experimental_graphql_validation_mode,
    +            GraphQLValidationMode::Both
    +        ) {
    +            compare_validation_errors(None, selections.validation_error.as_ref());
    +        }
    +
             // the `statsReportKey` field should match the original query instead of the filtered query, to index them all under the same query
             let operation_signature = if original_query != filtered_query {
                 Some(
    diff --git a/apollo-router/src/spec/query.rs b/apollo-router/src/spec/query.rs
    index 6f6b5e02d3..da5b66d4c3 100644
    --- a/apollo-router/src/spec/query.rs
    +++ b/apollo-router/src/spec/query.rs
    @@ -69,9 +69,12 @@ pub(crate) struct Query {
         pub(crate) is_original: bool,
         /// Validation errors, used for comparison with the JS implementation.
         ///
    +    /// `ValidationErrors` is not serde-serializable. If this comes from cache,
    +    /// the plan ought also to be cached, so we should not need this value anyways.
         /// XXX(@goto-bus-stop): Remove when only Rust validation is used
         #[derivative(PartialEq = "ignore", Hash = "ignore")]
    -    pub(crate) validation_error: Option,
    +    #[serde(skip)]
    +    pub(crate) validation_error: Option,
     }
     
     #[derive(Debug, Serialize, Deserialize)]
    @@ -318,7 +321,10 @@ impl Query {
         }
     
         /// Check for validation errors in a query in the compiler.
    -    pub(crate) fn validate_query(compiler: &ApolloCompiler, id: FileId) -> Result<(), SpecError> {
    +    pub(crate) fn validate_query(
    +        compiler: &ApolloCompiler,
    +        id: FileId,
    +    ) -> Result<(), ValidationErrors> {
             // Bail out on validation errors, only if the input is expected to be valid
             let diagnostics = compiler.db.validate_executable(id);
             let errors = diagnostics
    @@ -330,10 +336,7 @@ impl Query {
                 return Ok(());
             }
     
    -        let errors = ValidationErrors { errors };
    -        errors.print();
    -
    -        Err(SpecError::ValidationError(errors.to_string()))
    +        Err(ValidationErrors { errors })
         }
     
         /// Extract serializable data structures from the apollo-compiler HIR.
    diff --git a/apollo-router/src/spec/schema.rs b/apollo-router/src/spec/schema.rs
    index 892883339b..32e955b786 100644
    --- a/apollo-router/src/spec/schema.rs
    +++ b/apollo-router/src/spec/schema.rs
    @@ -91,7 +91,6 @@ impl Schema {
                 let errors = ValidationErrors {
                     errors: diagnostics.clone(),
                 };
    -            errors.print();
     
                 // Only error out if new validation is used: with `Both`, we take the legacy
                 // validation as authoritative and only use the new result for comparison
    
    From db86ab6299080a7c56687d9e412a099e57ca704e Mon Sep 17 00:00:00 2001
    From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
    Date: Fri, 8 Sep 2023 19:08:33 +0000
    Subject: [PATCH 43/50] fix(deps): update dependency dd-trace to v4.15.0
    
    ---
     dockerfiles/tracing/datadog-subgraph/package-lock.json | 6 +++---
     1 file changed, 3 insertions(+), 3 deletions(-)
    
    diff --git a/dockerfiles/tracing/datadog-subgraph/package-lock.json b/dockerfiles/tracing/datadog-subgraph/package-lock.json
    index da582feb1e..cfa64b06c1 100644
    --- a/dockerfiles/tracing/datadog-subgraph/package-lock.json
    +++ b/dockerfiles/tracing/datadog-subgraph/package-lock.json
    @@ -868,9 +868,9 @@
           "integrity": "sha512-/RC5F4l1SCqD/jazwUF6+t34Cd8zTSAGZ7rvvZu1whZUhD2a5MOGKjSGowoGcpj/cbVZk1ZODIooJEQQq3nNAA=="
         },
         "node_modules/dd-trace": {
    -      "version": "4.14.0",
    -      "resolved": "https://registry.npmjs.org/dd-trace/-/dd-trace-4.14.0.tgz",
    -      "integrity": "sha512-hxYWynK44VWz5FjQTG9QmQwpb9WkgBQ1QMfan9VfXt04N1H9slvFBteh3rKOUwZk61mHmqorWC0uYz4ipcQUig==",
    +      "version": "4.15.0",
    +      "resolved": "https://registry.npmjs.org/dd-trace/-/dd-trace-4.15.0.tgz",
    +      "integrity": "sha512-LesEnL2X1qqvwSCCxkSm/qRkuFUpmamf/BloGcz3B72og2qdNel8W8uviwNO9b7OGC3Fm+QzdfqwfTkoIMUZwQ==",
           "hasInstallScript": true,
           "dependencies": {
             "@datadog/native-appsec": "^3.2.0",
    
    From 8f2f059f2af78600b28c74a424326e4056a5c140 Mon Sep 17 00:00:00 2001
    From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
    Date: Mon, 11 Sep 2023 06:52:49 +0000
    Subject: [PATCH 44/50] chore(deps): update all non-major packages >= 1.0
    
    ---
     Cargo.lock               | 11 ++++++-----
     apollo-router/Cargo.toml |  6 +++---
     2 files changed, 9 insertions(+), 8 deletions(-)
    
    diff --git a/Cargo.lock b/Cargo.lock
    index f203e3022b..2ea7eb152b 100644
    --- a/Cargo.lock
    +++ b/Cargo.lock
    @@ -5051,14 +5051,15 @@ dependencies = [
     
     [[package]]
     name = "rhai"
    -version = "1.15.1"
    +version = "1.16.1"
     source = "registry+https://github.com/rust-lang/crates.io-index"
    -checksum = "4c2a11a646ef5d4e4a9d5cf80c7e4ecb20f9b1954292d5c5e6d6cbc8d33728ec"
    +checksum = "637a4f79f65571b1fd1a0ebbae05bbbf58a01faf612abbc3eea15cda34f0b87a"
     dependencies = [
      "ahash",
    - "bitflags 1.3.2",
    + "bitflags 2.4.0",
      "instant",
      "num-traits",
    + "once_cell",
      "rhai_codegen",
      "serde",
      "smallvec",
    @@ -5579,9 +5580,9 @@ dependencies = [
     
     [[package]]
     name = "serde_json"
    -version = "1.0.105"
    +version = "1.0.106"
     source = "registry+https://github.com/rust-lang/crates.io-index"
    -checksum = "693151e1ac27563d6dbcec9dee9fbd5da8539b20fa14ad3752b2e6d363ace360"
    +checksum = "2cc66a619ed80bf7a0f6b17dd063a84b88f6dea1813737cf469aef1d081142c2"
     dependencies = [
      "indexmap 2.0.0",
      "itoa",
    diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml
    index 50c7235249..6e9eab0e03 100644
    --- a/apollo-router/Cargo.toml
    +++ b/apollo-router/Cargo.toml
    @@ -164,7 +164,7 @@ prost-types = "0.11.9"
     proteus = "0.5.0"
     rand = "0.8.5"
     rand_core = "0.6.4"
    -rhai = { version = "1.15.1", features = ["sync", "serde", "internals"] }
    +rhai = { version = "1.16.1", features = ["sync", "serde", "internals"] }
     regex = "1.9.5"
     reqwest = { version = "0.11.19", default-features = false, features = [
         "rustls-tls",
    @@ -182,7 +182,7 @@ shellexpand = "3.1.0"
     sha2 = "0.10.7"
     serde = { version = "1.0.188", features = ["derive", "rc"] }
     serde_json_bytes = { version = "0.2.1", features = ["preserve_order"] }
    -serde_json = { version = "1.0.105", features = [
    +serde_json = { version = "1.0.106", features = [
         "preserve_order",
         "float_roundtrip",
     ] }
    @@ -275,7 +275,7 @@ reqwest = { version = "0.11.19", default-features = false, features = [
         "json",
         "stream",
     ] }
    -rhai = { version = "1.15.1", features = [
    +rhai = { version = "1.16.1", features = [
         "sync",
         "serde",
         "internals",
    
    From 60152517353559ad180b0137a20ab0e315eaed60 Mon Sep 17 00:00:00 2001
    From: Bryn Cooke 
    Date: Mon, 11 Sep 2023 09:39:42 +0100
    Subject: [PATCH 45/50] Fix metrics typos (#3798)
    
    Fix typos:
    * timout->timeout
    * filtered->authorization.filtered
    * needs_authenticated->authorization.authenticated
    * needs_requires_scopes->authorization.requires_scopes
    
    
    
    
    **Checklist**
    
    Complete the checklist (and note appropriate exceptions) before a final
    PR is raised.
    
    - [ ] Changes are compatible[^1]
    - [ ] Documentation[^2] completed
    - [ ] Performance impact assessed and acceptable
    - Tests added and passing[^3]
        - [ ] Unit Tests
        - [ ] Integration Tests
        - [ ] Manual Tests
    
    **Exceptions**
    
    *Note any exceptions here*
    
    **Notes**
    
    [^1]. It may be appropriate to bring upcoming changes to the attention
    of other (impacted) groups. Please endeavour to do this before seeking
    PR approval. The mechanism for doing this will vary considerably, so use
    your judgement as to how and when to do this.
    [^2]. Configuration is an important part of many changes. Where
    applicable please try to document configuration examples.
    [^3]. Tick whichever testing boxes are applicable. If you are adding
    Manual Tests:
    - please document the manual testing (extensively) in the Exceptions.
    - please raise a separate issue to automate the test and label it (or
    ask for it to be labeled) as `manual test`
    
    Co-authored-by: bryn 
    ---
     apollo-router/src/configuration/metrics.rs                  | 2 +-
     ...ration__metrics__test__metrics@entities.router.yaml.snap | 2 +-
     ..._metrics__test__metrics@traffic_shaping.router.yaml.snap | 2 +-
     apollo-router/src/plugins/authorization/mod.rs              | 6 +++---
     4 files changed, 6 insertions(+), 6 deletions(-)
    
    diff --git a/apollo-router/src/configuration/metrics.rs b/apollo-router/src/configuration/metrics.rs
    index eb459b92e9..1bd5db048a 100644
    --- a/apollo-router/src/configuration/metrics.rs
    +++ b/apollo-router/src/configuration/metrics.rs
    @@ -263,7 +263,7 @@ impl Metrics {
             log_usage_metrics!(
                 value.apollo.router.config.traffic_shaping,
                 "$.traffic_shaping",
    -            opt.router.timout,
    +            opt.router.timeout,
                 "$$[?(@.router.timeout)]",
                 opt.router.rate_limit,
                 "$.router.global_rate_limit",
    diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@entities.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@entities.router.yaml.snap
    index e4fe10d957..dc5ccc2c68 100644
    --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@entities.router.yaml.snap
    +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@entities.router.yaml.snap
    @@ -8,7 +8,7 @@ value.apollo.router.config.entities:
     value.apollo.router.config.traffic_shaping:
       - 1
       - opt__router__rate_limit__: false
    -    opt__router__timout__: false
    +    opt__router__timeout__: false
         opt__subgraph__compression__: false
         opt__subgraph__deduplicate_query__: false
         opt__subgraph__http2__: false
    diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@traffic_shaping.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@traffic_shaping.router.yaml.snap
    index ab53cd0460..87a594c8fa 100644
    --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@traffic_shaping.router.yaml.snap
    +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@traffic_shaping.router.yaml.snap
    @@ -5,7 +5,7 @@ expression: "&metrics.metrics"
     value.apollo.router.config.traffic_shaping:
       - 1
       - opt__router__rate_limit__: true
    -    opt__router__timout__: true
    +    opt__router__timeout__: true
         opt__subgraph__compression__: true
         opt__subgraph__deduplicate_query__: true
         opt__subgraph__http2__: true
    diff --git a/apollo-router/src/plugins/authorization/mod.rs b/apollo-router/src/plugins/authorization/mod.rs
    index 7bc42dd114..489f02a4c7 100644
    --- a/apollo-router/src/plugins/authorization/mod.rs
    +++ b/apollo-router/src/plugins/authorization/mod.rs
    @@ -482,9 +482,9 @@ impl Plugin for AuthorizationPlugin {
                     if needs_authenticated || needs_requires_scopes {
                         tracing::info!(
                             monotonic_counter.apollo.router.operations.authorization = 1u64,
    -                        filtered = filtered,
    -                        authenticated = needs_authenticated,
    -                        requires_scopes = needs_requires_scopes,
    +                        authorization.filtered = filtered,
    +                        authorization.needs_authenticated = needs_authenticated,
    +                        authorization.needs_requires_scopes = needs_requires_scopes,
                         );
                     }
     
    
    From 3806cffcac27e7b068a6e4ab04faa3a978fa099d Mon Sep 17 00:00:00 2001
    From: Jeremy Lempereur 
    Date: Mon, 11 Sep 2023 14:13:22 +0200
    Subject: [PATCH 46/50] Subgraph authentication: Make sure Request signing
     happens after Compression and APQ (#3735)
    
    Fix #3608
    
    The router now adds SigningParams to the private context, which the
    subgraph service can use to sign http calls before the HTTP fetch is
    made (for websocket connection and regular http calls)
    ---
     .changesets/fix_igni_sigv4_http_level.md      |  12 +
     .../src/plugins/authentication/mod.rs         |   2 +-
     .../src/plugins/authentication/subgraph.rs    | 233 ++++++++++++------
     .../src/services/subgraph_service.rs          |  60 ++++-
     docs/source/configuration/authn-subgraph.mdx  |   6 +
     5 files changed, 231 insertions(+), 82 deletions(-)
     create mode 100644 .changesets/fix_igni_sigv4_http_level.md
    
    diff --git a/.changesets/fix_igni_sigv4_http_level.md b/.changesets/fix_igni_sigv4_http_level.md
    new file mode 100644
    index 0000000000..5679499772
    --- /dev/null
    +++ b/.changesets/fix_igni_sigv4_http_level.md
    @@ -0,0 +1,12 @@
    +### Subgraph authentication: Make sure Request signing happens after Compression and APQ ([Issue #3608](https://github.com/apollographql/router/issues/3608))
    +
    +[Subgraph authentication](https://www.apollographql.com/docs/router/configuration/authn-subgraph) is available since router v1.27.0.
    +
    +Unfortunately this first version didn't work well with features that operate with the SubgraphService, for example:
    +  - Subgraph APQ
    +  - Subgraph HTTP compression
    +  - Custom plugins that operate on the Subgraph level, written either via coprocessors, in rhai, or native.
    +
    +The router will now sign subgraph requests just before they are sent to subgraphs.
    +
    +By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/3735
    diff --git a/apollo-router/src/plugins/authentication/mod.rs b/apollo-router/src/plugins/authentication/mod.rs
    index b90c814b7a..2c8d81fefc 100644
    --- a/apollo-router/src/plugins/authentication/mod.rs
    +++ b/apollo-router/src/plugins/authentication/mod.rs
    @@ -44,7 +44,7 @@ use crate::services::router;
     use crate::Context;
     
     mod jwks;
    -mod subgraph;
    +pub(crate) mod subgraph;
     
     #[cfg(test)]
     mod tests;
    diff --git a/apollo-router/src/plugins/authentication/subgraph.rs b/apollo-router/src/plugins/authentication/subgraph.rs
    index 7839d1628d..0ad2abb7f5 100644
    --- a/apollo-router/src/plugins/authentication/subgraph.rs
    +++ b/apollo-router/src/plugins/authentication/subgraph.rs
    @@ -1,4 +1,3 @@
    -use core::ops::ControlFlow;
     use std::collections::HashMap;
     use std::sync::Arc;
     use std::time::SystemTime;
    @@ -11,14 +10,16 @@ use aws_sigv4::http_request::PayloadChecksumKind;
     use aws_sigv4::http_request::SignableBody;
     use aws_sigv4::http_request::SignableRequest;
     use aws_sigv4::http_request::SigningSettings;
    +use aws_sigv4::signing_params;
     use aws_types::region::Region;
    +use http::Request;
    +use hyper::Body;
     use schemars::JsonSchema;
     use serde::Deserialize;
     use tower::BoxError;
     use tower::ServiceBuilder;
     use tower::ServiceExt;
     
    -use crate::layers::ServiceBuilderExt;
     use crate::services::SubgraphRequest;
     
     /// Hardcoded Config using access_key and secret.
    @@ -193,6 +194,114 @@ pub(crate) struct SigningParamsConfig {
         credentials_provider: Arc,
         region: Region,
         service_name: String,
    +    subgraph_name: String,
    +}
    +
    +impl SigningParamsConfig {
    +    pub(crate) async fn sign(
    +        self,
    +        mut req: Request,
    +        subgraph_name: &str,
    +    ) -> Result, BoxError> {
    +        let credentials = self.credentials().await?;
    +        let builder = self.signing_params_builder(&credentials).await?;
    +        let (parts, body) = req.into_parts();
    +        // Depending on the servicve, AWS refuses sigv4 payloads that contain specific headers.
    +        // We'll go with default signed headers
    +        let headers = Default::default();
    +        // UnsignedPayload only applies to lattice
    +        let body_bytes = hyper::body::to_bytes(body).await?.to_vec();
    +        let signable_request = SignableRequest::new(
    +            &parts.method,
    +            &parts.uri,
    +            &headers,
    +            match self.service_name.as_str() {
    +                "vpc-lattice-svcs" => SignableBody::UnsignedPayload,
    +                _ => SignableBody::Bytes(body_bytes.as_slice()),
    +            },
    +        );
    +
    +        let signing_params = builder.build().expect("all required fields set");
    +
    +        let (signing_instructions, _signature) = sign(signable_request, &signing_params)
    +            .map_err(|err| {
    +                increment_failure_counter(subgraph_name);
    +                let error = format!("failed to sign GraphQL body for AWS SigV4: {}", err);
    +                tracing::error!("{}", error);
    +                error
    +            })?
    +            .into_parts();
    +        req = Request::::from_parts(parts, body_bytes.into());
    +        signing_instructions.apply_to_request(&mut req);
    +        increment_success_counter(subgraph_name);
    +        Ok(req)
    +    }
    +    // This function is the same as above, except it's a new one because () doesn't implement HttpBody`
    +    pub(crate) async fn sign_empty(
    +        self,
    +        mut req: Request<()>,
    +        subgraph_name: &str,
    +    ) -> Result, BoxError> {
    +        let credentials = self.credentials().await?;
    +        let builder = self.signing_params_builder(&credentials).await?;
    +        let (parts, _) = req.into_parts();
    +        // Depending on the servicve, AWS refuses sigv4 payloads that contain specific headers.
    +        // We'll go with default signed headers
    +        let headers = Default::default();
    +        // UnsignedPayload only applies to lattice
    +        let signable_request = SignableRequest::new(
    +            &parts.method,
    +            &parts.uri,
    +            &headers,
    +            match self.service_name.as_str() {
    +                "vpc-lattice-svcs" => SignableBody::UnsignedPayload,
    +                _ => SignableBody::Bytes(&[]),
    +            },
    +        );
    +
    +        let signing_params = builder.build().expect("all required fields set");
    +
    +        let (signing_instructions, _signature) = sign(signable_request, &signing_params)
    +            .map_err(|err| {
    +                increment_failure_counter(subgraph_name);
    +                let error = format!("failed to sign GraphQL body for AWS SigV4: {}", err);
    +                tracing::error!("{}", error);
    +                error
    +            })?
    +            .into_parts();
    +        req = Request::<()>::from_parts(parts, ());
    +        signing_instructions.apply_to_request(&mut req);
    +        increment_success_counter(subgraph_name);
    +        Ok(req)
    +    }
    +
    +    async fn signing_params_builder<'s>(
    +        &'s self,
    +        credentials: &'s Credentials,
    +    ) -> Result, BoxError> {
    +        let settings = get_signing_settings(self);
    +        let mut builder = http_request::SigningParams::builder()
    +            .access_key(credentials.access_key_id())
    +            .secret_key(credentials.secret_access_key())
    +            .region(self.region.as_ref())
    +            .service_name(&self.service_name)
    +            .time(SystemTime::now())
    +            .settings(settings);
    +        builder.set_security_token(credentials.session_token());
    +        Ok(builder)
    +    }
    +
    +    async fn credentials(&self) -> Result {
    +        self.credentials_provider
    +            .provide_credentials()
    +            .await
    +            .map_err(|err| {
    +                increment_failure_counter(self.subgraph_name.as_str());
    +                let error = format!("failed to get credentials for AWS SigV4 signing: {}", err);
    +                tracing::error!("{}", error);
    +                error.into()
    +            })
    +    }
     }
     
     fn increment_success_counter(subgraph_name: &str) {
    @@ -234,6 +343,7 @@ pub(super) async fn make_signing_params(
                     region: config.region(),
                     service_name: config.service_name(),
                     credentials_provider,
    +                subgraph_name: subgraph_name.to_string(),
                 })
             }
         }
    @@ -244,7 +354,7 @@ pub(super) async fn make_signing_params(
     fn get_signing_settings(signing_params: &SigningParamsConfig) -> SigningSettings {
         let mut settings = SigningSettings::default();
         settings.payload_checksum_kind = match signing_params.service_name.as_str() {
    -        "s3" | "vpc-lattice-svcs" => PayloadChecksumKind::XAmzSha256,
    +        "appsync" | "s3" | "vpc-lattice-svcs" => PayloadChecksumKind::XAmzSha256,
             _ => PayloadChecksumKind::NoHeader,
         };
         settings
    @@ -261,77 +371,12 @@ impl SubgraphAuth {
             service: crate::services::subgraph::BoxService,
         ) -> crate::services::subgraph::BoxService {
             if let Some(signing_params) = self.params_for_service(name) {
    -            let name = name.to_string();
                 ServiceBuilder::new()
    -                .checkpoint_async(move |mut req: SubgraphRequest| {
    +                .map_request(move |req: SubgraphRequest| {
                         let signing_params = signing_params.clone();
    -                    let name = name.clone();
    -                    async move {
    -                        let credentials = signing_params
    -                            .credentials_provider
    -                            .provide_credentials()
    -                            .await
    -                            .map_err(|err| {
    -                                increment_failure_counter(name.as_str());
    -                                let error = format!(
    -                                    "failed to get credentials for AWS SigV4 signing: {}",
    -                                    err
    -                                );
    -                                tracing::error!("{}", error);
    -                                error
    -                            })?;
    -
    -                        let settings = get_signing_settings(&signing_params);
    -                        let mut builder = http_request::SigningParams::builder()
    -                            .access_key(credentials.access_key_id())
    -                            .secret_key(credentials.secret_access_key())
    -                            .region(signing_params.region.as_ref())
    -                            .service_name(&signing_params.service_name)
    -                            .time(SystemTime::now())
    -                            .settings(settings);
    -                        builder.set_security_token(credentials.session_token());
    -                        let body_bytes =
    -                            serde_json::to_vec(&req.subgraph_request.body()).map_err(|err| {
    -                                increment_failure_counter(name.as_str());
    -                                let error = format!(
    -                                    "failed to serialize GraphQL body for AWS SigV4 signing: {}",
    -                                    err
    -                                );
    -                                tracing::error!("{}", error);
    -                                error
    -                            })?;
    -
    -                        // UnsignedPayload only applies to lattice
    -                        let signable_request = SignableRequest::new(
    -                            req.subgraph_request.method(),
    -                            req.subgraph_request.uri(),
    -                            req.subgraph_request.headers(),
    -                            match signing_params.service_name.as_str() {
    -                                "vpc-lattice-svcs" => SignableBody::UnsignedPayload,
    -                                _ => SignableBody::Bytes(&body_bytes),
    -                            },
    -                        );
    -
    -                        let signing_params = builder.build().expect("all required fields set");
    -
    -                        let (signing_instructions, _signature) =
    -                            sign(signable_request, &signing_params)
    -                                .map_err(|err| {
    -                                    increment_failure_counter(name.as_str());
    -                                    let error = format!(
    -                                        "failed to sign GraphQL body for AWS SigV4: {}",
    -                                        err
    -                                    );
    -                                    tracing::error!("{}", error);
    -                                    error
    -                                })?
    -                                .into_parts();
    -                        signing_instructions.apply_to_request(&mut req.subgraph_request);
    -                        increment_success_counter(name.as_str());
    -                        Ok(ControlFlow::Continue(req))
    -                    }
    +                    req.context.private_entries.lock().insert(signing_params);
    +                    req
                     })
    -                .buffered()
                     .service(service)
                     .boxed()
             } else {
    @@ -396,6 +441,10 @@ mod test {
                     .await
                     .payload_checksum_kind
             );
    +        assert_eq!(
    +            PayloadChecksumKind::XAmzSha256,
    +            test_signing_settings("appsync").await.payload_checksum_kind
    +        );
             assert_eq!(
                 PayloadChecksumKind::NoHeader,
                 test_signing_settings("something-else")
    @@ -464,10 +513,10 @@ mod test {
             mock.expect_call()
                 .times(1)
                 .withf(|request| {
    +                let http_request = get_signed_request(request, "products".to_string());
                     assert_eq!(
                         "UNSIGNED-PAYLOAD",
    -                    request
    -                        .subgraph_request
    +                    http_request
                             .headers()
                             .get("x-amz-content-sha256")
                             .unwrap()
    @@ -509,21 +558,22 @@ mod test {
             mock.expect_call()
                 .times(1)
                 .withf(|request| {
    -                let authorization_regex = Regex::new(r"AWS4-HMAC-SHA256 Credential=id/\d{8}/us-east-1/s3/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-content-sha256;x-amz-date, Signature=[a-f0-9]{64}").unwrap();
    -                let authorization_header_str = request.subgraph_request.headers().get("authorization").unwrap().to_str().unwrap();
    +                let http_request = get_signed_request(request, "products".to_string());
    +                let authorization_regex = Regex::new(r"AWS4-HMAC-SHA256 Credential=id/\d{8}/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=[a-f0-9]{64}").unwrap();
    +                let authorization_header_str = http_request.headers().get("authorization").unwrap().to_str().unwrap();
                     assert_eq!(match authorization_regex.find(authorization_header_str) {
                         Some(m) => m.as_str(),
                         None => "no match"
                     }, authorization_header_str);
     
                     let x_amz_date_regex = Regex::new(r"\d{8}T\d{6}Z").unwrap();
    -                let x_amz_date_header_str = request.subgraph_request.headers().get("x-amz-date").unwrap().to_str().unwrap();
    +                let x_amz_date_header_str = http_request.headers().get("x-amz-date").unwrap().to_str().unwrap();
                     assert_eq!(match x_amz_date_regex.find(x_amz_date_header_str) {
                         Some(m) => m.as_str(),
                         None => "no match"
                     }, x_amz_date_header_str);
     
    -                assert_eq!(request.subgraph_request.headers().get("x-amz-content-sha256").unwrap(), "255959b4c6e11c1080f61ce0d75eb1b565c1772173335a7828ba9c13c25c0d8c");
    +                assert_eq!(http_request.headers().get("x-amz-content-sha256").unwrap(), "255959b4c6e11c1080f61ce0d75eb1b565c1772173335a7828ba9c13c25c0d8c");
     
                     true
                 })
    @@ -579,6 +629,7 @@ mod test {
                         .header(HOST, "rhost")
                         .header(CONTENT_LENGTH, "22")
                         .header(CONTENT_TYPE, "graphql")
    +                    .uri("https://test-endpoint.com")
                         .body(Request::builder().query("query").build())
                         .expect("expecting valid request"),
                 )
    @@ -586,4 +637,32 @@ mod test {
                 .context(Context::new())
                 .build()
         }
    +
    +    fn get_signed_request(
    +        request: &SubgraphRequest,
    +        service_name: String,
    +    ) -> hyper::Request {
    +        let signing_params = {
    +            let ctx = request.context.private_entries.lock();
    +            let sp = ctx.get::();
    +            sp.cloned().unwrap()
    +        };
    +
    +        let http_request = request
    +            .clone()
    +            .subgraph_request
    +            .map(|body| hyper::Body::from(serde_json::to_string(&body).unwrap()));
    +
    +        std::thread::spawn(move || {
    +            let rt = tokio::runtime::Runtime::new().unwrap();
    +            rt.block_on(async {
    +                signing_params
    +                    .sign(http_request, service_name.as_str())
    +                    .await
    +                    .unwrap()
    +            })
    +        })
    +        .join()
    +        .unwrap()
    +    }
     }
    diff --git a/apollo-router/src/services/subgraph_service.rs b/apollo-router/src/services/subgraph_service.rs
    index c99e482786..8ba73b4068 100644
    --- a/apollo-router/src/services/subgraph_service.rs
    +++ b/apollo-router/src/services/subgraph_service.rs
    @@ -60,6 +60,7 @@ use super::Plugins;
     use crate::error::FetchError;
     use crate::graphql;
     use crate::json_ext::Object;
    +use crate::plugins::authentication::subgraph::SigningParamsConfig;
     use crate::plugins::subscription::create_verifier;
     use crate::plugins::subscription::CallbackMode;
     use crate::plugins::subscription::SubscriptionConfig;
    @@ -488,11 +489,28 @@ async fn call_websocket(
         };
     
         let request = get_websocket_request(service_name.clone(), parts, subgraph_cfg)?;
    +
         let display_headers = context.contains_key(LOGGING_DISPLAY_HEADERS);
         let display_body = context.contains_key(LOGGING_DISPLAY_BODY);
    +
    +    let signing_params = context
    +        .private_entries
    +        .lock()
    +        .get::()
    +        .cloned();
    +
    +    let request = if let Some(signing_params) = signing_params {
    +        signing_params
    +            .sign_empty(request, service_name.as_str())
    +            .await?
    +    } else {
    +        request
    +    };
    +
         if display_headers {
             tracing::info!(http.request.headers = ?request.headers(), apollo.subgraph.name = %service_name, "Websocket request headers to subgraph {service_name:?}");
         }
    +
         if display_body {
             tracing::info!(http.request.body = ?request.body(), apollo.subgraph.name = %service_name, "Websocket request body to subgraph {service_name:?}");
         }
    @@ -530,14 +548,24 @@ async fn call_websocket(
             }
             _ => connect_async(request).instrument(subgraph_req_span).await,
         }
    -    .map_err(|err| FetchError::SubrequestWsError {
    -        service: service_name.clone(),
    -        reason: format!("cannot connect websocket to subgraph: {err}"),
    +    .map_err(|err| {
    +        if display_body || display_headers {
    +            tracing::info!(
    +                http.response.error = format!("{:?}", &err), apollo.subgraph.name = %service_name, "Websocket connection error from subgraph {service_name:?} received"
    +            );
    +        }
    +        FetchError::SubrequestWsError {
    +            service: service_name.clone(),
    +            reason: format!("cannot connect websocket to subgraph: {err}"),
    +        }
         })?;
     
    +    if display_headers {
    +        tracing::info!(response.headers = ?resp.headers(), apollo.subgraph.name = %service_name, "Websocket response headers to subgraph {service_name:?}");
    +    }
         if display_body {
             tracing::info!(
    -            response.body = %String::from_utf8_lossy(&resp.body_mut().take().unwrap_or_default()), apollo.subgraph.name = %service_name, "Raw response body from subgraph {service_name:?} received"
    +            response.body = %String::from_utf8_lossy(&resp.body_mut().take().unwrap_or_default()), apollo.subgraph.name = %service_name, "Websocket response body from subgraph {service_name:?} received"
             );
         }
     
    @@ -674,6 +702,18 @@ async fn call_http(
         let display_headers = context.contains_key(LOGGING_DISPLAY_HEADERS);
         let display_body = context.contains_key(LOGGING_DISPLAY_BODY);
     
    +    let signing_params = context
    +        .private_entries
    +        .lock()
    +        .get::()
    +        .cloned();
    +
    +    let request = if let Some(signing_params) = signing_params {
    +        signing_params.sign(request, service_name).await?
    +    } else {
    +        request
    +    };
    +
         // Print out the debug for the request
         if display_headers {
             tracing::info!(http.request.headers = ?request.headers(), apollo.subgraph.name = %service_name, "Request headers to subgraph {service_name:?}");
    @@ -694,6 +734,18 @@ async fn call_http(
         .instrument(subgraph_req_span)
         .await?;
     
    +    // Print out the debug for the response
    +    if display_headers {
    +        tracing::info!(response.headers = ?parts.headers, apollo.subgraph.name = %service_name, "Response headers from subgraph {service_name:?}");
    +    }
    +    if display_body {
    +        if let Some(Ok(b)) = &body {
    +            tracing::info!(
    +                response.body = %String::from_utf8_lossy(b), apollo.subgraph.name = %service_name, "Raw response body from subgraph {service_name:?} received"
    +            );
    +        }
    +    }
    +
         let mut graphql_response = match (content_type, body, parts.status.is_success()) {
             (Ok(ContentType::ApplicationGraphqlResponseJson), Some(Ok(body)), _)
             | (Ok(ContentType::ApplicationJson), Some(Ok(body)), true) => {
    diff --git a/docs/source/configuration/authn-subgraph.mdx b/docs/source/configuration/authn-subgraph.mdx
    index 63a1f98c85..771af8ac1e 100644
    --- a/docs/source/configuration/authn-subgraph.mdx
    +++ b/docs/source/configuration/authn-subgraph.mdx
    @@ -7,6 +7,12 @@ The Apollo Router supports subgraph request authentication and key rotation via
     
     This allows you to secure communication to AWS subgraphs by making sure a subgraph request was made by the Apollo Router, and the payload hasn't been tampered with.
     
    +We have tested the feature against the following services:
    +  - AWS Lambda URL
    +  - AWS Appsync
    +  - AWS Amazon API Gateway
    +  - VPC Lattice ⚠️ VPC Lattice doesn't support websockets, you won't be able to use Subscriptions in passthrough mode.
    +
     **To use this feature:**
     
     To use this feature, your AWS hosted subgraphs must be configured with IAM to accept [signed requests](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-signing.html).
    
    From 5b9c2161353cc91fa8de62446d3e938bc6765666 Mon Sep 17 00:00:00 2001
    From: Jeremy Lempereur 
    Date: Mon, 11 Sep 2023 14:13:39 +0200
    Subject: [PATCH 47/50] Fix: deal with interface inheritance when retrieving
     selectionset (#3793)
    
    Followup to #3718, this changeset makes sure we're able to generate the
    most concrete selection set for a given operation.
    
    This means finding the most concrete type we can when we're dealing with
    interfaces:
    - If InterfaceA implements InterfaceB, use InterfaceA as current_type to
    generate an inline fragment's selection set
    
    Given the following invariants:
    ```graphql
      interface OperationItemStuff implements OperationItem
    ```
    
    For
    ```graphql
    fragment OperationItemFragment on OperationItem {
      ... on OperationItemStuff {
         stuff
      }
    }
    ```
    
    The most concrete interface to generate fields for `OperationItemStuff`
    is not `OperationItem`, so we narrow down the selection to
    `OperationItemStuff`.
    
    The fixes for #3718 still apply, IE:
    
    Given the following invariants:
    ```graphql
      type Dog implements Animal
    ```
    For
    ```graphql
    ...on Animal {
      id
      ...on Dog {
        name
      }
    }
    ```
    The most concrete type to generate a selection set for `Dog` is not
    `Animal`, so we narrow down the selection to `Dog`.
    ---
     .../src/services/supergraph_service.rs        | 203 ++++++++++++++++++
     apollo-router/src/spec/schema.rs              |  38 ++++
     apollo-router/src/spec/selection.rs           |   9 +-
     3 files changed, 248 insertions(+), 2 deletions(-)
    
    diff --git a/apollo-router/src/services/supergraph_service.rs b/apollo-router/src/services/supergraph_service.rs
    index 9f14a693a0..198b29b8be 100644
    --- a/apollo-router/src/services/supergraph_service.rs
    +++ b/apollo-router/src/services/supergraph_service.rs
    @@ -3103,4 +3103,207 @@ mod tests {
             );
             insta::assert_json_snapshot!(with_reversed_fragments);
         }
    +
    +    #[tokio::test]
    +    async fn multiple_interface_types() {
    +        let schema = r#"
    +      schema
    +        @link(url: "https://specs.apollo.dev/link/v1.0")
    +        @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) {
    +        query: Query
    +      }
    +
    +      directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE
    +
    +      directive @join__field(
    +        graph: join__Graph
    +        requires: join__FieldSet
    +        provides: join__FieldSet
    +        type: String
    +        external: Boolean
    +        override: String
    +        usedOverridden: Boolean
    +      ) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION
    +
    +      directive @join__graph(name: String!, url: String!) on ENUM_VALUE
    +
    +      directive @join__implements(
    +        graph: join__Graph!
    +        interface: String!
    +      ) repeatable on OBJECT | INTERFACE
    +
    +      directive @join__type(
    +        graph: join__Graph!
    +        key: join__FieldSet
    +        extension: Boolean! = false
    +        resolvable: Boolean! = true
    +        isInterfaceObject: Boolean! = false
    +      ) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR
    +
    +      directive @join__unionMember(
    +        graph: join__Graph!
    +        member: String!
    +      ) repeatable on UNION
    +
    +      directive @link(
    +        url: String
    +        as: String
    +        for: link__Purpose
    +        import: [link__Import]
    +      ) repeatable on SCHEMA
    +
    +      directive @tag(
    +        name: String!
    +      ) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA
    +
    +      enum link__Purpose {
    +        EXECUTION
    +        SECURITY
    +      }
    +
    +      scalar join__FieldSet
    +      scalar link__Import
    +
    +      enum join__Graph {
    +        GRAPH1 @join__graph(name: "graph1", url: "http://localhost:8080/graph1")
    +      }
    +
    +      type Query @join__type(graph: GRAPH1) {
    +        root(id: ID!): Root @join__field(graph: GRAPH1)
    +      }
    +
    +      type Root @join__type(graph: GRAPH1, key: "id") {
    +        id: ID!
    +        operation(a: Int, b: Int): OperationResult!
    +      }
    +
    +      union OperationResult
    +        @join__type(graph: GRAPH1)
    +        @join__unionMember(graph: GRAPH1, member: "Operation") =
    +          Operation
    +
    +      type Operation @join__type(graph: GRAPH1) {
    +        id: ID!
    +        item: [OperationItem!]!
    +      }
    +
    +      interface OperationItem @join__type(graph: GRAPH1) {
    +        type: OperationType!
    +      }
    +
    +      enum OperationType @join__type(graph: GRAPH1) {
    +        ADD_ARGUMENT @join__enumValue(graph: GRAPH1)
    +      }
    +
    +      interface OperationItemRootType implements OperationItem
    +        @join__implements(graph: GRAPH1, interface: "OperationItem")
    +        @join__type(graph: GRAPH1) {
    +        rootType: String!
    +        type: OperationType!
    +      }
    +
    +      interface OperationItemStuff implements OperationItem
    +        @join__implements(graph: GRAPH1, interface: "OperationItem")
    +        @join__type(graph: GRAPH1) {
    +        stuff: String!
    +        type: OperationType!
    +      }
    +
    +      type OperationAddArgument implements OperationItem & OperationItemStuff & OperationItemValue
    +        @join__implements(graph: GRAPH1, interface: "OperationItem")
    +        @join__implements(graph: GRAPH1, interface: "OperationItemStuff")
    +        @join__implements(graph: GRAPH1, interface: "OperationItemValue")
    +        @join__type(graph: GRAPH1) {
    +        stuff: String!
    +        type: OperationType!
    +        value: String!
    +      }
    +
    +      interface OperationItemValue implements OperationItem
    +        @join__implements(graph: GRAPH1, interface: "OperationItem")
    +        @join__type(graph: GRAPH1) {
    +        type: OperationType!
    +        value: String!
    +      }
    +
    +      type OperationRemoveSchemaRootOperation implements OperationItem & OperationItemRootType
    +        @join__implements(graph: GRAPH1, interface: "OperationItem")
    +        @join__implements(graph: GRAPH1, interface: "OperationItemRootType")
    +        @join__type(graph: GRAPH1) {
    +        rootType: String!
    +        type: OperationType!
    +      }
    +      "#;
    +
    +        let query = r#"fragment OperationItemFragment on OperationItem {
    +            __typename
    +            ... on OperationItemStuff {
    +              __typename
    +              stuff
    +            }
    +            ... on OperationItemRootType {
    +              __typename
    +              rootType
    +            }
    +          }
    +          query MyQuery($id: ID!, $a: Int, $b: Int) {
    +            root(id: $id) {
    +              __typename
    +              operation(a: $a, b: $b) {
    +                __typename
    +                ... on Operation {
    +                  __typename
    +                  item {
    +                    __typename
    +                    ...OperationItemFragment
    +                    ... on OperationItemStuff {
    +                      __typename
    +                      stuff
    +                    }
    +                    ... on OperationItemValue {
    +                      __typename
    +                      value
    +                    }
    +                  }
    +                  id
    +                }
    +              }
    +              id
    +            }
    +          }"#;
    +
    +        let subgraphs = MockedSubgraphs([
    +            // The response isn't interesting to us,
    +            // we just need to make sure the query makes it through parsing and validation
    +            ("graph1", MockSubgraph::builder().with_json(
    +                serde_json::json!{{"query":"query MyQuery__graph1__0($id:ID!$a:Int$b:Int){root(id:$id){__typename operation(a:$a b:$b){__typename ...on Operation{__typename item{__typename ...on OperationItemStuff{__typename stuff}...on OperationItemRootType{__typename rootType}...on OperationItemValue{__typename value}}id}}id}}", "operationName": "MyQuery__graph1__0", "variables":{"id":"1234","a":1,"b":2}}},
    +                serde_json::json!{{"data": null }}
    +            ).build()),
    +            ].into_iter().collect());
    +
    +        let service = TestHarness::builder()
    +            .configuration_json(serde_json::json!({"include_subgraph_errors": { "all": true } }))
    +            .unwrap()
    +            .schema(schema)
    +            .extra_plugin(subgraphs)
    +            .build_supergraph()
    +            .await
    +            .unwrap();
    +
    +        let request = supergraph::Request::fake_builder()
    +            .context(defer_context())
    +            .query(query)
    +            .variables(
    +                serde_json_bytes::json! {{ "id": "1234", "a": 1, "b": 2}}
    +                    .as_object()
    +                    .unwrap()
    +                    .clone(),
    +            )
    +            .build()
    +            .unwrap();
    +
    +        let mut stream = service.clone().oneshot(request).await.unwrap();
    +        let response = stream.next_response().await.unwrap();
    +        assert_eq!(serde_json_bytes::Value::Null, response.data.unwrap());
    +    }
     }
    diff --git a/apollo-router/src/spec/schema.rs b/apollo-router/src/spec/schema.rs
    index 32e955b786..744e3d2cee 100644
    --- a/apollo-router/src/spec/schema.rs
    +++ b/apollo-router/src/spec/schema.rs
    @@ -13,6 +13,7 @@ use http::Uri;
     use sha2::Digest;
     use sha2::Sha256;
     
    +use super::FieldType;
     use crate::configuration::GraphQLValidationMode;
     use crate::error::ParseErrors;
     use crate::error::SchemaError;
    @@ -159,6 +160,19 @@ impl Schema {
                 .unwrap_or(false)
         }
     
    +    pub(crate) fn is_implementation(&self, interface: &str, implementor: &str) -> bool {
    +        self.type_system
    +            .definitions
    +            .interfaces
    +            .get(interface)
    +            .map(|interface| {
    +                interface
    +                    .implements_interfaces()
    +                    .any(|i| i.interface() == implementor)
    +            })
    +            .unwrap_or(false)
    +    }
    +
         pub(crate) fn is_interface(&self, abstract_type: &str) -> bool {
             self.type_system
                 .definitions
    @@ -166,6 +180,30 @@ impl Schema {
                 .contains_key(abstract_type)
         }
     
    +    // given two field, returns the one that implements the other, if applicable
    +    pub(crate) fn most_precise<'f>(
    +        &self,
    +        a: &'f FieldType,
    +        b: &'f FieldType,
    +    ) -> Option<&'f FieldType> {
    +        let typename_a = a.inner_type_name().unwrap_or_default();
    +        let typename_b = b.inner_type_name().unwrap_or_default();
    +        if typename_a == typename_b {
    +            return Some(a);
    +        }
    +        if self.is_subtype(typename_a, typename_b) || self.is_implementation(typename_a, typename_b)
    +        {
    +            Some(b)
    +        } else if self.is_subtype(typename_b, typename_a)
    +            || self.is_implementation(typename_b, typename_a)
    +        {
    +            Some(a)
    +        } else {
    +            // No relationship between a and b
    +            None
    +        }
    +    }
    +
         /// Return an iterator over subgraphs that yields the subgraph name and its URL.
         pub(crate) fn subgraphs(&self) -> impl Iterator {
             self.subgraphs.iter()
    diff --git a/apollo-router/src/spec/selection.rs b/apollo-router/src/spec/selection.rs
    index 5252a7f5d9..e1d6ee7386 100644
    --- a/apollo-router/src/spec/selection.rs
    +++ b/apollo-router/src/spec/selection.rs
    @@ -164,12 +164,17 @@ impl Selection {
                             schema.is_subtype(
                                 type_condition.as_str(),
                                 current_type.inner_type_name().unwrap_or("")
    -                        ) ||
    +                        ) || schema.is_implementation(
    +                            type_condition.as_str(),
    +                            current_type.inner_type_name().unwrap_or(""))
    +                     ||
                             // if the current type and the type condition are both the same interface, it is still valid
                             type_condition.as_str()
                                 == current_type.inner_type_name().unwrap_or("")
                         );
    -                    current_type
    +                    let relevant_type = schema.most_precise(current_type, &fragment_type);
    +                    debug_assert!(relevant_type.is_some());
    +                    relevant_type.unwrap_or(&fragment_type)
                     } else {
                         &fragment_type
                     };
    
    From 4a4b292df062df9601125efce252bc477a4cfb57 Mon Sep 17 00:00:00 2001
    From: Parker Holladay 
    Date: Mon, 11 Sep 2023 06:15:20 -0600
    Subject: [PATCH 48/50] docs: fix auth rhai example and link (#3795)
    
    - In Rhai, it is `request[.subgraph].body.extensions`, the example was
    missing the `.body`
    - Fix link to rhai example for forwarding headers
    ---
     docs/source/configuration/authn-jwt.mdx | 8 ++++----
     1 file changed, 4 insertions(+), 4 deletions(-)
    
    diff --git a/docs/source/configuration/authn-jwt.mdx b/docs/source/configuration/authn-jwt.mdx
    index 149be31017..0edff5acde 100644
    --- a/docs/source/configuration/authn-jwt.mdx
    +++ b/docs/source/configuration/authn-jwt.mdx
    @@ -46,8 +46,8 @@ You enable JWT authentication for your router with the following steps:
           router:
             jwt:
               jwks: # This key is required.
    -          - url: https://dev-zzp5enui.us.auth0.com/.well-known/jwks.json
    -            issuer: 
    +            - url: https://dev-zzp5enui.us.auth0.com/.well-known/jwks.json
    +              issuer: 
     
               # These keys are optional. Default values are shown.
               header_name: Authorization
    @@ -204,7 +204,7 @@ fn process_request(request) {
             status: 401
           };
         }
    -    request.subgraph.extensions["claims"] = claims;
    +    request.subgraph.body.extensions["claims"] = claims;
     }
     ```
     
    @@ -650,7 +650,7 @@ This matching strategy is necessary because some identity providers (IdPs) don't
     
     ## Forwarding JWTs to subgraphs
     
    -Because the Apollo Router handles validating incoming JWTs, you rarely need to pass those JWTs to individual subgraphs in their entirety. Instead, you usually want to [pass JWT _claims_ to subgraphs](#example-forwarding-claims-to-subgraphs) to enable fine-grained access control.
    +Because the Apollo Router handles validating incoming JWTs, you rarely need to pass those JWTs to individual subgraphs in their entirety. Instead, you usually want to [pass JWT _claims_ to subgraphs](#example-forwarding-claims-to-subgraphs-as-headers) to enable fine-grained access control.
     
     If you _do_ need to pass entire JWTs to subgraphs, you can do so via the Apollo Router's general-purpose [HTTP header propagation settings](./header-propagation).
     
    
    From ce7dc99b0678a8e71ffcf96dd1e36103a4e5b597 Mon Sep 17 00:00:00 2001
    From: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com>
    Date: Mon, 11 Sep 2023 14:43:03 +0200
    Subject: [PATCH 49/50] prep release: v1.30.0-alpha.1
    
    ---
     Cargo.lock                                    |  6 +--
     apollo-router-benchmarks/Cargo.toml           |  2 +-
     apollo-router-scaffold/Cargo.toml             |  2 +-
     .../templates/base/Cargo.toml                 |  2 +-
     .../templates/base/xtask/Cargo.toml           |  2 +-
     apollo-router/Cargo.toml                      |  2 +-
     .../tracing/docker-compose.datadog.yml        |  2 +-
     dockerfiles/tracing/docker-compose.jaeger.yml |  2 +-
     dockerfiles/tracing/docker-compose.zipkin.yml |  2 +-
     docs/source/containerization/docker.mdx       |  2 +-
     docs/source/containerization/kubernetes.mdx   | 28 +++++------
     helm/chart/router/Chart.yaml                  |  4 +-
     helm/chart/router/README.md                   |  6 +--
     licenses.html                                 | 49 +------------------
     scripts/install.sh                            |  2 +-
     15 files changed, 33 insertions(+), 80 deletions(-)
    
    diff --git a/Cargo.lock b/Cargo.lock
    index afed5c2e74..0232f7b0fc 100644
    --- a/Cargo.lock
    +++ b/Cargo.lock
    @@ -263,7 +263,7 @@ dependencies = [
     
     [[package]]
     name = "apollo-router"
    -version = "1.30.0-alpha.0"
    +version = "1.30.0-alpha.1"
     dependencies = [
      "access-json",
      "anyhow",
    @@ -409,7 +409,7 @@ dependencies = [
     
     [[package]]
     name = "apollo-router-benchmarks"
    -version = "1.30.0-alpha.0"
    +version = "1.30.0-alpha.1"
     dependencies = [
      "apollo-parser 0.6.1",
      "apollo-router",
    @@ -425,7 +425,7 @@ dependencies = [
     
     [[package]]
     name = "apollo-router-scaffold"
    -version = "1.30.0-alpha.0"
    +version = "1.30.0-alpha.1"
     dependencies = [
      "anyhow",
      "cargo-scaffold",
    diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml
    index 77682a342f..9ae9234517 100644
    --- a/apollo-router-benchmarks/Cargo.toml
    +++ b/apollo-router-benchmarks/Cargo.toml
    @@ -1,6 +1,6 @@
     [package]
     name = "apollo-router-benchmarks"
    -version = "1.30.0-alpha.0"
    +version = "1.30.0-alpha.1"
     authors = ["Apollo Graph, Inc. "]
     edition = "2021"
     license = "Elastic-2.0"
    diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml
    index b0ebc8ca52..a58f350a3b 100644
    --- a/apollo-router-scaffold/Cargo.toml
    +++ b/apollo-router-scaffold/Cargo.toml
    @@ -1,6 +1,6 @@
     [package]
     name = "apollo-router-scaffold"
    -version = "1.30.0-alpha.0"
    +version = "1.30.0-alpha.1"
     authors = ["Apollo Graph, Inc. "]
     edition = "2021"
     license = "Elastic-2.0"
    diff --git a/apollo-router-scaffold/templates/base/Cargo.toml b/apollo-router-scaffold/templates/base/Cargo.toml
    index d0f7f03120..2d6220f94c 100644
    --- a/apollo-router-scaffold/templates/base/Cargo.toml
    +++ b/apollo-router-scaffold/templates/base/Cargo.toml
    @@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" }
     apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" }
     {{else}}
     # Note if you update these dependencies then also update xtask/Cargo.toml
    -apollo-router = "1.30.0-alpha.0"
    +apollo-router = "1.30.0-alpha.1"
     {{/if}}
     {{/if}}
     async-trait = "0.1.52"
    diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.toml
    index 2e8cab7b02..aaa1dff37b 100644
    --- a/apollo-router-scaffold/templates/base/xtask/Cargo.toml
    +++ b/apollo-router-scaffold/templates/base/xtask/Cargo.toml
    @@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" }
     {{#if branch}}
     apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" }
     {{else}}
    -apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.30.0-alpha.0" }
    +apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.30.0-alpha.1" }
     {{/if}}
     {{/if}}
     anyhow = "1.0.58"
    diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml
    index 504730d885..87ea47b7e4 100644
    --- a/apollo-router/Cargo.toml
    +++ b/apollo-router/Cargo.toml
    @@ -1,6 +1,6 @@
     [package]
     name = "apollo-router"
    -version = "1.30.0-alpha.0"
    +version = "1.30.0-alpha.1"
     authors = ["Apollo Graph, Inc. "]
     repository = "https://github.com/apollographql/router/"
     documentation = "https://docs.rs/apollo-router"
    diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml
    index d0198984eb..9355430d4b 100644
    --- a/dockerfiles/tracing/docker-compose.datadog.yml
    +++ b/dockerfiles/tracing/docker-compose.datadog.yml
    @@ -3,7 +3,7 @@ services:
     
       apollo-router:
         container_name: apollo-router
    -    image: ghcr.io/apollographql/router:v1.30.0-alpha.0
    +    image: ghcr.io/apollographql/router:v1.30.0-alpha.1
         volumes:
           - ./supergraph.graphql:/etc/config/supergraph.graphql
           - ./router/datadog.router.yaml:/etc/config/configuration.yaml
    diff --git a/dockerfiles/tracing/docker-compose.jaeger.yml b/dockerfiles/tracing/docker-compose.jaeger.yml
    index d94d57403c..21b302be20 100644
    --- a/dockerfiles/tracing/docker-compose.jaeger.yml
    +++ b/dockerfiles/tracing/docker-compose.jaeger.yml
    @@ -4,7 +4,7 @@ services:
       apollo-router:
         container_name: apollo-router
         #build: ./router
    -    image: ghcr.io/apollographql/router:v1.30.0-alpha.0
    +    image: ghcr.io/apollographql/router:v1.30.0-alpha.1
         volumes:
           - ./supergraph.graphql:/etc/config/supergraph.graphql
           - ./router/jaeger.router.yaml:/etc/config/configuration.yaml
    diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml
    index 976e6a048c..6e6657c280 100644
    --- a/dockerfiles/tracing/docker-compose.zipkin.yml
    +++ b/dockerfiles/tracing/docker-compose.zipkin.yml
    @@ -4,7 +4,7 @@ services:
       apollo-router:
         container_name: apollo-router
         build: ./router
    -    image: ghcr.io/apollographql/router:v1.30.0-alpha.0
    +    image: ghcr.io/apollographql/router:v1.30.0-alpha.1
         volumes:
           - ./supergraph.graphql:/etc/config/supergraph.graphql
           - ./router/zipkin.router.yaml:/etc/config/configuration.yaml
    diff --git a/docs/source/containerization/docker.mdx b/docs/source/containerization/docker.mdx
    index 4db3611508..dcee10e6dd 100644
    --- a/docs/source/containerization/docker.mdx
    +++ b/docs/source/containerization/docker.mdx
    @@ -11,7 +11,7 @@ The default behaviour of the router images is suitable for a quickstart or devel
     
     Note: The [docker documentation](https://docs.docker.com/engine/reference/run/) for the run command may be helpful when reading through the examples.
     
    -Note: The exact image version to use is your choice depending on which release you wish to use. In the following examples, replace `` with your chosen version. e.g.: `v1.30.0-alpha.0`
    +Note: The exact image version to use is your choice depending on which release you wish to use. In the following examples, replace `` with your chosen version. e.g.: `v1.30.0-alpha.1`
     
     ## Override the configuration
     
    diff --git a/docs/source/containerization/kubernetes.mdx b/docs/source/containerization/kubernetes.mdx
    index 259f19679c..ef3774a62f 100644
    --- a/docs/source/containerization/kubernetes.mdx
    +++ b/docs/source/containerization/kubernetes.mdx
    @@ -13,7 +13,7 @@ import { Link } from 'gatsby';
     
     [Helm](https://helm.sh) is the package manager for kubernetes.
     
    -There is a complete [helm chart definition](https://github.com/apollographql/router/tree/v1.30.0-alpha.0/helm/chart/router) in the repo which illustrates how to use helm to deploy the router in kubernetes.
    +There is a complete [helm chart definition](https://github.com/apollographql/router/tree/v1.30.0-alpha.1/helm/chart/router) in the repo which illustrates how to use helm to deploy the router in kubernetes.
     
     In both the following examples, we are using helm to install the router:
      - into namespace "router-deploy" (create namespace if it doesn't exist)
    @@ -64,10 +64,10 @@ kind: ServiceAccount
     metadata:
       name: release-name-router
       labels:
    -    helm.sh/chart: router-1.30.0-alpha.0
    +    helm.sh/chart: router-1.30.0-alpha.1
         app.kubernetes.io/name: router
         app.kubernetes.io/instance: release-name
    -    app.kubernetes.io/version: "v1.30.0-alpha.0"
    +    app.kubernetes.io/version: "v1.30.0-alpha.1"
         app.kubernetes.io/managed-by: Helm
     ---
     # Source: router/templates/secret.yaml
    @@ -76,10 +76,10 @@ kind: Secret
     metadata:
       name: "release-name-router"
       labels:
    -    helm.sh/chart: router-1.30.0-alpha.0
    +    helm.sh/chart: router-1.30.0-alpha.1
         app.kubernetes.io/name: router
         app.kubernetes.io/instance: release-name
    -    app.kubernetes.io/version: "v1.30.0-alpha.0"
    +    app.kubernetes.io/version: "v1.30.0-alpha.1"
         app.kubernetes.io/managed-by: Helm
     data:
       managedFederationApiKey: "UkVEQUNURUQ="
    @@ -90,10 +90,10 @@ kind: ConfigMap
     metadata:
       name: release-name-router
       labels:
    -    helm.sh/chart: router-1.30.0-alpha.0
    +    helm.sh/chart: router-1.30.0-alpha.1
         app.kubernetes.io/name: router
         app.kubernetes.io/instance: release-name
    -    app.kubernetes.io/version: "v1.30.0-alpha.0"
    +    app.kubernetes.io/version: "v1.30.0-alpha.1"
         app.kubernetes.io/managed-by: Helm
     data:
       configuration.yaml: |
    @@ -117,10 +117,10 @@ kind: Service
     metadata:
       name: release-name-router
       labels:
    -    helm.sh/chart: router-1.30.0-alpha.0
    +    helm.sh/chart: router-1.30.0-alpha.1
         app.kubernetes.io/name: router
         app.kubernetes.io/instance: release-name
    -    app.kubernetes.io/version: "v1.30.0-alpha.0"
    +    app.kubernetes.io/version: "v1.30.0-alpha.1"
         app.kubernetes.io/managed-by: Helm
     spec:
       type: ClusterIP
    @@ -143,10 +143,10 @@ kind: Deployment
     metadata:
       name: release-name-router
       labels:
    -    helm.sh/chart: router-1.30.0-alpha.0
    +    helm.sh/chart: router-1.30.0-alpha.1
         app.kubernetes.io/name: router
         app.kubernetes.io/instance: release-name
    -    app.kubernetes.io/version: "v1.30.0-alpha.0"
    +    app.kubernetes.io/version: "v1.30.0-alpha.1"
         app.kubernetes.io/managed-by: Helm
       
       annotations:
    @@ -174,7 +174,7 @@ spec:
             - name: router
               securityContext:
                 {}
    -          image: "ghcr.io/apollographql/router:v1.30.0-alpha.0"
    +          image: "ghcr.io/apollographql/router:v1.30.0-alpha.1"
               imagePullPolicy: IfNotPresent
               args:
                 - --hot-reload
    @@ -226,10 +226,10 @@ kind: Pod
     metadata:
       name: "release-name-router-test-connection"
       labels:
    -    helm.sh/chart: router-1.30.0-alpha.0
    +    helm.sh/chart: router-1.30.0-alpha.1
         app.kubernetes.io/name: router
         app.kubernetes.io/instance: release-name
    -    app.kubernetes.io/version: "v1.30.0-alpha.0"
    +    app.kubernetes.io/version: "v1.30.0-alpha.1"
         app.kubernetes.io/managed-by: Helm
       annotations:
         "helm.sh/hook": test
    diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml
    index 634a6608b4..8209043e7d 100644
    --- a/helm/chart/router/Chart.yaml
    +++ b/helm/chart/router/Chart.yaml
    @@ -20,10 +20,10 @@ type: application
     # so it matches the shape of our release process and release automation.
     # By proxy of that decision, this version uses SemVer 2.0.0, though the prefix
     # of "v" is not included.
    -version: 1.30.0-alpha.0
    +version: 1.30.0-alpha.1
     
     # This is the version number of the application being deployed. This version number should be
     # incremented each time you make changes to the application. Versions are not expected to
     # follow Semantic Versioning. They should reflect the version the application is using.
     # It is recommended to use it with quotes.
    -appVersion: "v1.30.0-alpha.0"
    \ No newline at end of file
    +appVersion: "v1.30.0-alpha.1"
    \ No newline at end of file
    diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md
    index 555a17c3c4..d93957af8d 100644
    --- a/helm/chart/router/README.md
    +++ b/helm/chart/router/README.md
    @@ -2,7 +2,7 @@
     
     [router](https://github.com/apollographql/router) Rust Graph Routing runtime for Apollo Federation
     
    -![Version: 1.30.0-alpha.0](https://img.shields.io/badge/Version-1.30.0--alpha.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.30.0-alpha.0](https://img.shields.io/badge/AppVersion-v1.30.0--alpha.0-informational?style=flat-square)
    +![Version: 1.30.0-alpha.1](https://img.shields.io/badge/Version-1.30.0--alpha.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.30.0-alpha.1](https://img.shields.io/badge/AppVersion-v1.30.0--alpha.1-informational?style=flat-square)
     
     ## Prerequisites
     
    @@ -11,7 +11,7 @@
     ## Get Repo Info
     
     ```console
    -helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.30.0-alpha.0
    +helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.30.0-alpha.1
     ```
     
     ## Install Chart
    @@ -19,7 +19,7 @@ helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.30.0-alpha.
     **Important:** only helm3 is supported
     
     ```console
    -helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.30.0-alpha.0 --values my-values.yaml
    +helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.30.0-alpha.1 --values my-values.yaml
     ```
     
     _See [configuration](#configuration) below._
    diff --git a/licenses.html b/licenses.html
    index f5fdd07f61..744d6ecffe 100644
    --- a/licenses.html
    +++ b/licenses.html
    @@ -48,10 +48,10 @@ 

    Overview of licenses:

  • Apache License 2.0 (62)
  • BSD 3-Clause "New" or "Revised" License (10)
  • ISC License (10)
  • -
  • Creative Commons Zero v1.0 Universal (2)
  • Elastic License 2.0 (2)
  • Mozilla Public License 2.0 (2)
  • BSD 2-Clause "Simplified" License (1)
  • +
  • Creative Commons Zero v1.0 Universal (1)
  • OpenSSL License (1)
  • Unicode License Agreement - Data Files and Software (2016) (1)
  • @@ -11778,53 +11778,6 @@

    Creative Commons Zero v1.0 Universal

    Used by:

    -
    Creative Commons CC0 1.0 Universal
    -
    -<<beginOptional;name=ccOptionalIntro>> CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED HEREUNDER.  <<endOptional>>
    -
    -Statement of Purpose
    -
    -The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work").
    -
    -Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others.
    -
    -For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights.
    -
    -1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following:
    -
    -     i. the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work;
    -
    -     ii. moral rights retained by the original author(s) and/or performer(s);
    -
    -     iii. publicity and privacy rights pertaining to a person's image or likeness depicted in a Work;
    -
    -     iv. rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below;
    -
    -     v. rights protecting the extraction, dissemination, use and reuse of data in a Work;
    -
    -     vi. database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and
    -
    -     vii. other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof.
    -
    -2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose.
    -
    -3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose.
    -
    -4. Limitations and Disclaimers.
    -
    -     a. No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document.
    -
    -     b. Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law.
    -
    -     c. Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work.
    -
    -     d. Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work. 
    - -
  • -

    Creative Commons Zero v1.0 Universal

    -

    Used by:

    -
    Creative Commons Legal Code
    diff --git a/scripts/install.sh b/scripts/install.sh
    index 235e23131f..a545a24875 100755
    --- a/scripts/install.sh
    +++ b/scripts/install.sh
    @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="https://github.com/apollographql/router/releases/downloa
     
     # Router version defined in apollo-router's Cargo.toml
     # Note: Change this line manually during the release steps.
    -PACKAGE_VERSION="v1.30.0-alpha.0"
    +PACKAGE_VERSION="v1.30.0-alpha.1"
     
     download_binary() {
         downloader --check
    
    From 8ca7484c1a96c7f9e381c3b701cf1380bc880476 Mon Sep 17 00:00:00 2001
    From: Jesse Rosenberger 
    Date: Thu, 14 Sep 2023 17:04:12 +0300
    Subject: [PATCH 50/50] prep release: v1.30.0
    
    ---
     .changesets/docs_fix_broken_links.md          |   5 -
     .../docs_geal_authorization_router_rhai.md    |   5 -
     .../feat_garypen_2278_rhai_router_service.md  |  34 -----
     .changesets/fix_bnjjj_fix_3683.md             |   5 -
     .../fix_bnjjj_fix_empty_multipart_stream.md   |   5 -
     ...njjj_subscription_default_configuration.md |   8 --
     .changesets/fix_bryn_fix_metrics_typing.md    |   6 -
     .changesets/fix_dragonfly_ship_win_folder.md  |   9 --
     .changesets/fix_geal_filter_events_too.md     |   6 -
     .../fix_geal_fix_authenticated_reporting.md   |   5 -
     ..._geal_remove_clones_from_headers_plugin.md |   5 -
     .changesets/fix_geal_telemetry_perf.md        |   5 -
     .changesets/fix_igni_sigv4_http_level.md      |  12 --
     .../fix_igni_typename_fragment_interfaces.md  |   5 -
     .changesets/maint_bnjjj_caching_metrics.md    |  38 ------
     .changesets/maint_geal_metadata_cleanup.md    |   6 -
     .changesets/maint_igni_rust_1_72_0.md         |   5 -
     .changesets/maint_nicolas_otlp_filter.md      |   5 -
     .changesets/maint_simon_is_terminal.md        |   8 --
     .changesets/maint_simon_pkiup.md              |  11 --
     CHANGELOG.md                                  | 129 ++++++++++++++++++
     Cargo.lock                                    |   6 +-
     apollo-router-benchmarks/Cargo.toml           |   2 +-
     apollo-router-scaffold/Cargo.toml             |   2 +-
     .../templates/base/Cargo.toml                 |   2 +-
     .../templates/base/xtask/Cargo.toml           |   2 +-
     apollo-router/Cargo.toml                      |   2 +-
     .../tracing/docker-compose.datadog.yml        |   2 +-
     dockerfiles/tracing/docker-compose.jaeger.yml |   2 +-
     dockerfiles/tracing/docker-compose.zipkin.yml |   2 +-
     docs/source/containerization/docker.mdx       |   2 +-
     docs/source/containerization/kubernetes.mdx   |  28 ++--
     helm/chart/router/Chart.yaml                  |   4 +-
     helm/chart/router/README.md                   |   6 +-
     licenses.html                                 | 118 +++++++++++-----
     scripts/install.sh                            |   2 +-
     36 files changed, 243 insertions(+), 256 deletions(-)
     delete mode 100644 .changesets/docs_fix_broken_links.md
     delete mode 100644 .changesets/docs_geal_authorization_router_rhai.md
     delete mode 100644 .changesets/feat_garypen_2278_rhai_router_service.md
     delete mode 100644 .changesets/fix_bnjjj_fix_3683.md
     delete mode 100644 .changesets/fix_bnjjj_fix_empty_multipart_stream.md
     delete mode 100644 .changesets/fix_bnjjj_subscription_default_configuration.md
     delete mode 100644 .changesets/fix_bryn_fix_metrics_typing.md
     delete mode 100644 .changesets/fix_dragonfly_ship_win_folder.md
     delete mode 100644 .changesets/fix_geal_filter_events_too.md
     delete mode 100644 .changesets/fix_geal_fix_authenticated_reporting.md
     delete mode 100644 .changesets/fix_geal_remove_clones_from_headers_plugin.md
     delete mode 100644 .changesets/fix_geal_telemetry_perf.md
     delete mode 100644 .changesets/fix_igni_sigv4_http_level.md
     delete mode 100644 .changesets/fix_igni_typename_fragment_interfaces.md
     delete mode 100644 .changesets/maint_bnjjj_caching_metrics.md
     delete mode 100644 .changesets/maint_geal_metadata_cleanup.md
     delete mode 100644 .changesets/maint_igni_rust_1_72_0.md
     delete mode 100644 .changesets/maint_nicolas_otlp_filter.md
     delete mode 100644 .changesets/maint_simon_is_terminal.md
     delete mode 100644 .changesets/maint_simon_pkiup.md
    
    diff --git a/.changesets/docs_fix_broken_links.md b/.changesets/docs_fix_broken_links.md
    deleted file mode 100644
    index d395aa0c07..0000000000
    --- a/.changesets/docs_fix_broken_links.md
    +++ /dev/null
    @@ -1,5 +0,0 @@
    -### Fix broken links
    -
    -This documentation change fixes an incorrect anchor link in the [CORS documentation](https://www.apollographql.com/docs/router/configuration/cors/) and removes links to authorization docs which have not yet been released. 
    -
    -By [@Meschreiber](https://github.com/Meschreiber) in https://github.com/apollographql/router/pull/3711
    diff --git a/.changesets/docs_geal_authorization_router_rhai.md b/.changesets/docs_geal_authorization_router_rhai.md
    deleted file mode 100644
    index 0e3596252b..0000000000
    --- a/.changesets/docs_geal_authorization_router_rhai.md
    +++ /dev/null
    @@ -1,5 +0,0 @@
    -### GraphOS authorization: add an example of scope manipulation with router service level rhai ([PR #3719](https://github.com/apollographql/router/pull/3719))
    -
    -The router authorization directive `@requiresScopes` expects scopes to come from the `scope` claim in the OAuth2 access token format ( https://datatracker.ietf.org/doc/html/rfc6749#section-3.3 ). Some tokens may have scopes stored in a different way, like an array of strings, or even in different claims. This documents a way to extract the scopes and prepare them in the right format for consumption by `@requiresScopes`, ushing Rhai.
    -
    -By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3719
    \ No newline at end of file
    diff --git a/.changesets/feat_garypen_2278_rhai_router_service.md b/.changesets/feat_garypen_2278_rhai_router_service.md
    deleted file mode 100644
    index 526e802477..0000000000
    --- a/.changesets/feat_garypen_2278_rhai_router_service.md
    +++ /dev/null
    @@ -1,34 +0,0 @@
    -### Provide a rhai interface to the router service ([Issue #2278](https://github.com/apollographql/router/issues/2278))
    -
    -Adds `Rhai` support for the `router_service`.
    -
    -It is now possible to interact with requests and responses at the `router_service` level from `Rhai`. The functionality is very similar to that provided for interacting with existing services, for example `supergraph_service`. For instance, you may map requests and responses as follows:
    -
    -```rust
    -fn router_service(service) {
    -    const request_callback = Fn("process_request");
    -    service.map_request(request_callback);
    -    const response_callback = Fn("process_response");
    -    service.map_response(response_callback);
    -}
    -
    -```
    -The main difference from existing services is that the router_service is dealing with HTTP Bodies, not well formatted GraphQL objects. This means that the `Request.body` or `Response.body` is not a well structured object that you may interact with, but is simply a String.
    -
    -This makes it more complex to deal with Request and Response bodies with the tradeoff being that a script author has more power and can perform tasks which are just not possible within the confines of a well-formed GraphQL object.
    -
    -This simple example, simply logs the bodies:
    -
    -```rust
    -// Generate a log for each request at this stage
    -fn process_request(request) {
    -    print(`body: ${request.body}`);
    -}
    -
    -// Generate a log for each response at this stage
    -fn process_response(response) {
    -    print(`body: ${response.body}`);
    -}
    -```
    -
    -By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/3234
    diff --git a/.changesets/fix_bnjjj_fix_3683.md b/.changesets/fix_bnjjj_fix_3683.md
    deleted file mode 100644
    index ff130ff6f4..0000000000
    --- a/.changesets/fix_bnjjj_fix_3683.md
    +++ /dev/null
    @@ -1,5 +0,0 @@
    -### fix(subscription): add x-accel-buffering header for multipart response ([Issue #3683](https://github.com/apollographql/router/issues/3683))
    -
    -Set `x-accel-buffering` to `no` when it's a multipart response because proxies need this configuration.
    -
    -By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/3749
    diff --git a/.changesets/fix_bnjjj_fix_empty_multipart_stream.md b/.changesets/fix_bnjjj_fix_empty_multipart_stream.md
    deleted file mode 100644
    index 68aa0de28a..0000000000
    --- a/.changesets/fix_bnjjj_fix_empty_multipart_stream.md
    +++ /dev/null
    @@ -1,5 +0,0 @@
    -### Handle multipart stream if the original stream is empty ([Issue #3293](https://github.com/apollographql/router/issues/3293))
    -
    -For subscription and defer, in case the multipart response stream is empty then it should end correctly.
    -
    -By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/3748
    \ No newline at end of file
    diff --git a/.changesets/fix_bnjjj_subscription_default_configuration.md b/.changesets/fix_bnjjj_subscription_default_configuration.md
    deleted file mode 100644
    index 2c4a5c207a..0000000000
    --- a/.changesets/fix_bnjjj_subscription_default_configuration.md
    +++ /dev/null
    @@ -1,8 +0,0 @@
    -### fix(subscription): force the deduplication to be enabled by default as it's documented ([PR #3773](https://github.com/apollographql/router/pull/3773))
    -
    -A bug was introduced in router v1.25.0 which caused [subscription deduplication](https://www.apollographql.com/docs/router/executing-operations/subscription-support#subscription-deduplication) to be disabled by default.
    -As documented, the router will enable deduplication by default, providing you with subscriptions that scale.
    -
    -Should you decide to disable it, you can still explicitly set `enable_deduplication` to `false`.
    -
    -By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/3773
    diff --git a/.changesets/fix_bryn_fix_metrics_typing.md b/.changesets/fix_bryn_fix_metrics_typing.md
    deleted file mode 100644
    index ab4d6eef03..0000000000
    --- a/.changesets/fix_bryn_fix_metrics_typing.md
    +++ /dev/null
    @@ -1,6 +0,0 @@
    -### Fix metrics attribute types ([Issue #3687](https://github.com/apollographql/router/issues/3687))
    -
    -Metrics attributes were being coerced to strings. This is now fixed.
    -In addition, the logic around types accepted as metrics attributes has been simplified. It will log and ignore values of the wrong type.
    -
    -By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/3724
    diff --git a/.changesets/fix_dragonfly_ship_win_folder.md b/.changesets/fix_dragonfly_ship_win_folder.md
    deleted file mode 100644
    index 892df67579..0000000000
    --- a/.changesets/fix_dragonfly_ship_win_folder.md
    +++ /dev/null
    @@ -1,9 +0,0 @@
    -### Subscriptions: Correct v1.28.x regression allowing panic via un-named subscription operation
    -
    -Correct a regression that was introduced in Router v1.28.0 which made a Router **panic** possible when the following _three_ conditions are _all_ met:
    -
    -1. When sending an un-named (i.e., "anonymous") `subscription` operation (e.g., `subscription { ... }`); **and**;
    -2. The Router has a `subscription` type defined in the Supergraph schema; **and**
    -3. Have subscriptions enabled (they are disabled by default) in the Router's YAML configuration, either by setting `enabled: true` _or_ by setting a `mode` within the `subscriptions` object (as seen in [the subscriptions documentation](https://www.apollographql.com/docs/router/executing-operations/subscription-support/#router-setup).
    -
    -By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/3738
    diff --git a/.changesets/fix_geal_filter_events_too.md b/.changesets/fix_geal_filter_events_too.md
    deleted file mode 100644
    index 579d457ea6..0000000000
    --- a/.changesets/fix_geal_filter_events_too.md
    +++ /dev/null
    @@ -1,6 +0,0 @@
    -### Do not record a trace if telemetry is not configured
    -
    -The OpenTelemetry handling code had a constant overhead on every request, due to the OpenTelemetryLayer recording data for every span, even when telemetry is not actually set up. We introduce a sampling filter that disables it entirely when no exporters are configured, which provides a performance boost in basic setups.
    -It also provides performance gains when exporters are set up: if a sampling ratio or client defined sampling are used, then the filter will only send the sampled traces to the rest of the stack, thus reducing the overhead again.
    -
    -By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2999
    diff --git a/.changesets/fix_geal_fix_authenticated_reporting.md b/.changesets/fix_geal_fix_authenticated_reporting.md
    deleted file mode 100644
    index ff189e1d7f..0000000000
    --- a/.changesets/fix_geal_fix_authenticated_reporting.md
    +++ /dev/null
    @@ -1,5 +0,0 @@
    -### Fix authenticated directive reporting ([PR #3753](https://github.com/apollographql/router/pull/3753))
    -
    -The context key for the `@authenticated` directive  only affects usage reporting
    -
    -By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3753
    \ No newline at end of file
    diff --git a/.changesets/fix_geal_remove_clones_from_headers_plugin.md b/.changesets/fix_geal_remove_clones_from_headers_plugin.md
    deleted file mode 100644
    index 36881a1de4..0000000000
    --- a/.changesets/fix_geal_remove_clones_from_headers_plugin.md
    +++ /dev/null
    @@ -1,5 +0,0 @@
    -### remove clones from the header plugin ([Issue #3068](https://github.com/apollographql/router/issues/3068))
    -
    -The list of header operations was cloned for every subgraph query, and this was increasing latency. We made sure the overhead is minimal by removing those allocations
    -
    -By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3721
    \ No newline at end of file
    diff --git a/.changesets/fix_geal_telemetry_perf.md b/.changesets/fix_geal_telemetry_perf.md
    deleted file mode 100644
    index f20ceee31a..0000000000
    --- a/.changesets/fix_geal_telemetry_perf.md
    +++ /dev/null
    @@ -1,5 +0,0 @@
    -### small performance improvements for telemetry ([PR #3656](https://github.com/apollographql/router/pull/3656))
    -
    -The SpanMetricsExporter, used to report span timings hade a few inefficiencies in the way it recognized spans, and it brought a constant overhead to the router usage, even when telemetry was not configured. It has now been isolated and optimized
    -
    -By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3656
    \ No newline at end of file
    diff --git a/.changesets/fix_igni_sigv4_http_level.md b/.changesets/fix_igni_sigv4_http_level.md
    deleted file mode 100644
    index 5679499772..0000000000
    --- a/.changesets/fix_igni_sigv4_http_level.md
    +++ /dev/null
    @@ -1,12 +0,0 @@
    -### Subgraph authentication: Make sure Request signing happens after Compression and APQ ([Issue #3608](https://github.com/apollographql/router/issues/3608))
    -
    -[Subgraph authentication](https://www.apollographql.com/docs/router/configuration/authn-subgraph) is available since router v1.27.0.
    -
    -Unfortunately this first version didn't work well with features that operate with the SubgraphService, for example:
    -  - Subgraph APQ
    -  - Subgraph HTTP compression
    -  - Custom plugins that operate on the Subgraph level, written either via coprocessors, in rhai, or native.
    -
    -The router will now sign subgraph requests just before they are sent to subgraphs.
    -
    -By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/3735
    diff --git a/.changesets/fix_igni_typename_fragment_interfaces.md b/.changesets/fix_igni_typename_fragment_interfaces.md
    deleted file mode 100644
    index 5ed53a9ee9..0000000000
    --- a/.changesets/fix_igni_typename_fragment_interfaces.md
    +++ /dev/null
    @@ -1,5 +0,0 @@
    -### Deal with interfaces on fragment spreads when no __typename is queried ([Issue #2587](https://github.com/apollographql/router/issues/2587))
    -
    -Operations would over rely on the presence of __typename to resolve selection sets on interface implementers. This changeset checks for the parent type in an InlineFragment, so we don't drop relevant selection set when applicable.
    -
    -By [@o0Ignition0o](https://github.com/o0Ignition0o) and [@geal](https://github.com/geal) in https://github.com/apollographql/router/pull/3718
    diff --git a/.changesets/maint_bnjjj_caching_metrics.md b/.changesets/maint_bnjjj_caching_metrics.md
    deleted file mode 100644
    index cd09700cfd..0000000000
    --- a/.changesets/maint_bnjjj_caching_metrics.md
    +++ /dev/null
    @@ -1,38 +0,0 @@
    -### Add experimental caching metrics ([PR #3532](https://github.com/apollographql/router/pull/3532))
    -
    -It adds a metric only if you configure `telemetry.metrics.common.experimental_cache_metrics.enabled` to `true`. It will generate metrics to evaluate which entities would benefit from caching. It simulates a cache with a TTL, configurable at `telemetry.metrics.common.experimental_cache_metrics.ttl` (default: 5 seconds), and measures the cache hit rate per entity type and subgraph.
    -
    -example
    -
    -```
    -# HELP apollo.router.operations.entity.cache_hit
    -# TYPE apollo_router_operations_entity.cache_hit histogram
    -apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="0.05"} 0
    -apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="0.1"} 0
    -apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="0.25"} 0
    -apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="0.5"} 0
    -apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="1"} 0
    -apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="2.5"} 3
    -apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="5"} 4
    -apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="10"} 4
    -apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="20"} 4
    -apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="1000"} 4
    -apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="+Inf"} 4
    -apollo_router_operations_entity_cache_hitsum{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version=""} 7
    -apollo_router_operations_entity_cache_hitcount{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version=""} 4
    -apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="0.05"} 0
    -apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="0.1"} 0
    -apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="0.25"} 0
    -apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="0.5"} 0
    -apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="1"} 0
    -apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="2.5"} 1
    -apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="5"} 1
    -apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="10"} 1
    -apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="20"} 1
    -apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="1000"} 1
    -apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="+Inf"} 1
    -apollo_router_operations_entity_cache_hitsum{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version=""} 1
    -apollo_router_operations_entity_cache_hitcount{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version=""} 1
    -```
    -
    -By [@bnjjj](https://github.com/bnjjj) [@Geal](https://github.com/geal) in https://github.com/apollographql/router/pull/3532
    \ No newline at end of file
    diff --git a/.changesets/maint_geal_metadata_cleanup.md b/.changesets/maint_geal_metadata_cleanup.md
    deleted file mode 100644
    index ee9628f870..0000000000
    --- a/.changesets/maint_geal_metadata_cleanup.md
    +++ /dev/null
    @@ -1,6 +0,0 @@
    -### Metadata cleanup ([PR #3746](https://github.com/apollographql/router/pull/3746))
    -
    -* remove unused patch entries in Cargo.toml
    -* remove exemptions for the chrono security advisories (they are fixed now)
    -
    -By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3746
    \ No newline at end of file
    diff --git a/.changesets/maint_igni_rust_1_72_0.md b/.changesets/maint_igni_rust_1_72_0.md
    deleted file mode 100644
    index 9371f0dcfa..0000000000
    --- a/.changesets/maint_igni_rust_1_72_0.md
    +++ /dev/null
    @@ -1,5 +0,0 @@
    -### Update rust toolchain to 1.72.0 ([PR #3707](https://github.com/apollographql/router/pull/3707))
    -
    -The router-bridge update now allows us to use the latest rust version.
    -
    -By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/3707
    diff --git a/.changesets/maint_nicolas_otlp_filter.md b/.changesets/maint_nicolas_otlp_filter.md
    deleted file mode 100644
    index 7bb2f7070c..0000000000
    --- a/.changesets/maint_nicolas_otlp_filter.md
    +++ /dev/null
    @@ -1,5 +0,0 @@
    -### Add support GraphOS Cloud metrics ([Issue #3760](https://github.com/apollographql/router/issues/3760))
    -
    -Add support for GraphOS Cloud metrics in the Apollo OTLP Exporter.
    -
    -By [@nmoutschen](https://github.com/nmoutschen) in https://github.com/apollographql/router/pull/3761
    \ No newline at end of file
    diff --git a/.changesets/maint_simon_is_terminal.md b/.changesets/maint_simon_is_terminal.md
    deleted file mode 100644
    index 7295cd62d3..0000000000
    --- a/.changesets/maint_simon_is_terminal.md
    +++ /dev/null
    @@ -1,8 +0,0 @@
    -### Replace atty crate with std ([PR #3729](https://github.com/apollographql/router/pull/3729))
    -
    -The crate is unmaintained, and the standard library has equivalent functionality since Rust 1.70.0
    -
    -* https://github.com/apollographql/router/security/dependabot/68 
    -* https://doc.rust-lang.org/stable/std/io/trait.IsTerminal.html
    -
    -By [@SimonSapin](https://github.com/SimonSapin) in https://github.com/apollographql/router/pull/3729
    diff --git a/.changesets/maint_simon_pkiup.md b/.changesets/maint_simon_pkiup.md
    deleted file mode 100644
    index 8e606d8631..0000000000
    --- a/.changesets/maint_simon_pkiup.md
    +++ /dev/null
    @@ -1,11 +0,0 @@
    -### Upgrade webpki and rustls-webpki crates ([PR #3728](https://github.com/apollographql/router/pull/3728))
    -
    -Brings fixes for:
    -
    -* https://rustsec.org/advisories/RUSTSEC-2023-0052
    -* https://rustsec.org/advisories/RUSTSEC-2023-0053
    -
    -Because Apollo Router does not accept client certificates, it could only be affected
    -if a subgraph supplied a pathological TLS server certificate.
    -
    -By [@SimonSapin](https://github.com/SimonSapin) in https://github.com/apollographql/router/pull/3728
    diff --git a/CHANGELOG.md b/CHANGELOG.md
    index 26f26fb183..dbe5ba1c2d 100644
    --- a/CHANGELOG.md
    +++ b/CHANGELOG.md
    @@ -4,6 +4,135 @@ All notable changes to Router will be documented in this file.
     
     This project adheres to [Semantic Versioning v2.0.0](https://semver.org/spec/v2.0.0.html).
     
    +# [1.30.0] - 2023-09-14
    +
    +## 🚀 Features
    +
    +### Rhai Support at the `router_service` ([Issue #2278](https://github.com/apollographql/router/issues/2278))
    +
    +It is now possible to interact with some aspects of requests and responses at the `router_service` level [using Rhai-based customizations](https://www.apollographql.com/docs/router/customizations/rhai/). The functionality is very similar to that provided for interacting with existing services, for example `supergraph_service`. For instance, you may "map" requests and responses as follows:
    +
    +```rust
    +fn router_service(service) {
    +    const request_callback = Fn("process_request");
    +    service.map_request(request_callback);
    +    const response_callback = Fn("process_response");
    +    service.map_response(response_callback);
    +}
    +```
    +
    +The main difference from [existing services](https://www.apollographql.com/docs/router/customizations/rhai/#router-request-lifecycle) is that the `router_service` allows operating at an HTTP transport layer rather than the more structured GraphQL representations available at later service layers, like the [supergraph service](https://www.apollographql.com/docs/router/customizations/rhai/#supergraphservice).
    +
    +Initially, we are **not** allowing access to the `body` property itself.  [This issue](https://github.com/apollographql/router/issues/3642) tracks changing that in the future.  For now, it is possible to access the `context` and `headers`.
    +
    +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/3234
    +
    +## 🐛 Fixes
    +
    +### Small performance improvements to telemetry ([PR #3656](https://github.com/apollographql/router/pull/3656))
    +
    +We applied some small performance improvements to the `SpanMetricsExporter` (which is used to report span timings), some of which apply in cases where telemetry is disabled and could be apparent to most users.
    +
    +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3656
    +
    +### Handle interfaces in fragment spreads when `__typename` is omitted ([Issue #2587](https://github.com/apollographql/router/issues/2587))
    +
    +We now check the parent type when using an inline-fragment, rather than relying on the expectation that `__typename` will be present.  For cases where `__typename` was being omitted, this fixes responses where a portion of the selection set was silently dropped and not returned.
    +
    +By [@o0Ignition0o](https://github.com/o0Ignition0o) and [@geal](https://github.com/geal) in https://github.com/apollographql/router/pull/3718
    +
    +### Deduplication is, again, enabled by default as documented ([PR #3773](https://github.com/apollographql/router/pull/3773))
    +
    +[Subscription deduplication](https://www.apollographql.com/docs/router/executing-operations/subscription-support#subscription-deduplication) is again enabled by default as it was intended to be.  This important performance feature for subscriptions at scale was inadvertently disabled in v1.25.0 due to a bug.
    +
    +To explicitly disable deduplication, [set `enable_deduplication` to `false` in your configuration](https://www.apollographql.com/docs/router/executing-operations/subscription-support/#disabling-deduplication).
    +
    +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/3773
    +
    +### Metrics are no longer coerced incorrectly ([Issue #3687](https://github.com/apollographql/router/issues/3687))
    +
    +Metric attributes are no longer incorrectly coerced to strings.  In addition, the logic around types which are accepted as metrics attributes has been simplified to avoid this in the future.  Going forward, if the wrong type is specified, values will be ignored and a log message (at debug level) will be emitted.
    +
    +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/3724
    +
    +### Optimizations applied to header-handling operations ([Issue #3068](https://github.com/apollographql/router/issues/3068))
    +
    +Latency and overhead of passing headers to subgraph queries has been reduced.
    +
    +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3721
    +
    +### Avoid request overhead when telemetry is not enabled
    +
    +The overhead of OpenTelemetry has been removed when no tracing exporters are configured.
    +
    +This also improves performance when sampling criteria has _not_ been met by preventing unsampled sampled trace events from propagating to the rest of the OpenTelemetry stack.
    +
    +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2999
    +
    +### Subgraph authentication: Apply signature after compression and APQ ([Issue #3608](https://github.com/apollographql/router/issues/3608))
    +
    +The router will now _sign_ subgraph requests _just before_ they are sent to the subgraph (i.e., a bit later than previously), following up on the functionality of [subgraph authentication](https://www.apollographql.com/docs/router/configuration/authn-subgraph) which was first introduced in v1.27.0.
    +
    +This fixes interactions with:
    +
    +  - Subgraph Automatic Persisted Queries (APQ)
    +  - Subgraph HTTP compression
    +  - Custom plugins that operate on the subgraph service (whether via Co-Processors, Rhai or a compiled Rust plugin)
    +
    +In most cases, the interactions between these features and the subgraph authentication feature were problematic and required disabling one or the other in order to generate a request that was correctly signed by the signature algorithm.  This should all be resolved.
    +
    +By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/3735
    +
    +### Handle multipart stream if the original stream was empty ([Issue #3293](https://github.com/apollographql/router/issues/3293))
    +
    +Multi-part response streams (which are used for [subscriptions](https://www.apollographql.com/docs/router/executing-operations/subscription-support/) and operations which include [`@defer` directive](https://www.apollographql.com/docs/router/executing-operations/defer-support/)) are now terminated correctly when the response stream is empty.
    +
    +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/3748
    +
    +### Subscriptions: Include `x-accel-buffering` header on multipart responses ([Issue #3683](https://github.com/apollographql/router/issues/3683))
    +
    +Setting the `x-accel-buffering` header to `no` for multipart responses allows certain proxies to configure themselves in a mode that is compatible with the buffering used by subscriptions.  This improves Subscriptions' compatibility with existing infrastructure.
    +
    +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/3749
    +
    +## 🛠 Maintenance
    +
    +### Our Rust Toolchain has been updated to v1.72.0 ([PR #3707](https://github.com/apollographql/router/pull/3707))
    +
    +Our Rust Toolchain has been updated to v1.72.0.  For the majority of our users (those who do not compile their own Router from source), this change will not have any impact. Otherwise, Rust 1.72.0 can now be used.
    +
    +By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/3707
    +
    +
    +### Replace `atty` crate with `std` ([PR #3729](https://github.com/apollographql/router/pull/3729))
    +
    +To resolve a security advisory (for which our usage was _not_ affected), we've replaced `atty` with `std`.  Instead, we now use equivalent functionality available in the Rust standard library, available since Rust v1.70.0.
    +
    +* https://github.com/apollographql/router/security/dependabot/68
    +* https://doc.rust-lang.org/stable/std/io/trait.IsTerminal.html
    +
    +By [@SimonSapin](https://github.com/SimonSapin) in https://github.com/apollographql/router/pull/3729
    +
    +### Upgrade `webpki` and `rustls-webpki` ([PR #3728](https://github.com/apollographql/router/pull/3728))
    +
    +These two dependency updates brings fixes for two separate security advisories:
    +
    +* https://rustsec.org/advisories/RUSTSEC-2023-0052
    +* https://rustsec.org/advisories/RUSTSEC-2023-0053
    +
    +Since Apollo Router does _not_ accept client certificates, it could only have been affected if a subgraph had provided a pathological TLS server certificate.
    +
    +By [@SimonSapin](https://github.com/SimonSapin) in https://github.com/apollographql/router/pull/3728
    +
    +## 📚 Documentation
    +
    +### GraphOS authorization: Exemplify scope manipulation with Rhai at the router service level ([PR #3719](https://github.com/apollographql/router/pull/3719))
    +
    +New [Authorization documentation](https://www.apollographql.com/docs/router/configuration/authorization/#requiresscopes) shows the how to use Rhai script to extract scopes and prepare them in the correct way, for use with `@requiresScope`.  This becomes relevant since `@requiresScopes` expects scopes to come from the `scope` claim in the [OAuth2 access token format](https://datatracker.ietf.org/doc/html/rfc6749#section-3.3) while tokens may have scopes stored differently, e.g., as an array of strings, or even as different claims.  If you have further questions on the right choice for you, please open a GitHub Discussion that provides an example of what you need to achieve.
    +
    +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3719
    +
    +
     # [1.29.1] - 2023-09-04
     
     ## 🚀 Features
    diff --git a/Cargo.lock b/Cargo.lock
    index 0232f7b0fc..9534fb8c42 100644
    --- a/Cargo.lock
    +++ b/Cargo.lock
    @@ -263,7 +263,7 @@ dependencies = [
     
     [[package]]
     name = "apollo-router"
    -version = "1.30.0-alpha.1"
    +version = "1.30.0"
     dependencies = [
      "access-json",
      "anyhow",
    @@ -409,7 +409,7 @@ dependencies = [
     
     [[package]]
     name = "apollo-router-benchmarks"
    -version = "1.30.0-alpha.1"
    +version = "1.30.0"
     dependencies = [
      "apollo-parser 0.6.1",
      "apollo-router",
    @@ -425,7 +425,7 @@ dependencies = [
     
     [[package]]
     name = "apollo-router-scaffold"
    -version = "1.30.0-alpha.1"
    +version = "1.30.0"
     dependencies = [
      "anyhow",
      "cargo-scaffold",
    diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml
    index 9ae9234517..31ed86c518 100644
    --- a/apollo-router-benchmarks/Cargo.toml
    +++ b/apollo-router-benchmarks/Cargo.toml
    @@ -1,6 +1,6 @@
     [package]
     name = "apollo-router-benchmarks"
    -version = "1.30.0-alpha.1"
    +version = "1.30.0"
     authors = ["Apollo Graph, Inc. "]
     edition = "2021"
     license = "Elastic-2.0"
    diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml
    index a58f350a3b..6290d88949 100644
    --- a/apollo-router-scaffold/Cargo.toml
    +++ b/apollo-router-scaffold/Cargo.toml
    @@ -1,6 +1,6 @@
     [package]
     name = "apollo-router-scaffold"
    -version = "1.30.0-alpha.1"
    +version = "1.30.0"
     authors = ["Apollo Graph, Inc. "]
     edition = "2021"
     license = "Elastic-2.0"
    diff --git a/apollo-router-scaffold/templates/base/Cargo.toml b/apollo-router-scaffold/templates/base/Cargo.toml
    index 2d6220f94c..27cb00b340 100644
    --- a/apollo-router-scaffold/templates/base/Cargo.toml
    +++ b/apollo-router-scaffold/templates/base/Cargo.toml
    @@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" }
     apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" }
     {{else}}
     # Note if you update these dependencies then also update xtask/Cargo.toml
    -apollo-router = "1.30.0-alpha.1"
    +apollo-router = "1.30.0"
     {{/if}}
     {{/if}}
     async-trait = "0.1.52"
    diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.toml
    index aaa1dff37b..fb2f1d2470 100644
    --- a/apollo-router-scaffold/templates/base/xtask/Cargo.toml
    +++ b/apollo-router-scaffold/templates/base/xtask/Cargo.toml
    @@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" }
     {{#if branch}}
     apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" }
     {{else}}
    -apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.30.0-alpha.1" }
    +apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.30.0" }
     {{/if}}
     {{/if}}
     anyhow = "1.0.58"
    diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml
    index 87ea47b7e4..9635c4b9ed 100644
    --- a/apollo-router/Cargo.toml
    +++ b/apollo-router/Cargo.toml
    @@ -1,6 +1,6 @@
     [package]
     name = "apollo-router"
    -version = "1.30.0-alpha.1"
    +version = "1.30.0"
     authors = ["Apollo Graph, Inc. "]
     repository = "https://github.com/apollographql/router/"
     documentation = "https://docs.rs/apollo-router"
    diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml
    index 9355430d4b..1fe9e4facc 100644
    --- a/dockerfiles/tracing/docker-compose.datadog.yml
    +++ b/dockerfiles/tracing/docker-compose.datadog.yml
    @@ -3,7 +3,7 @@ services:
     
       apollo-router:
         container_name: apollo-router
    -    image: ghcr.io/apollographql/router:v1.30.0-alpha.1
    +    image: ghcr.io/apollographql/router:v1.30.0
         volumes:
           - ./supergraph.graphql:/etc/config/supergraph.graphql
           - ./router/datadog.router.yaml:/etc/config/configuration.yaml
    diff --git a/dockerfiles/tracing/docker-compose.jaeger.yml b/dockerfiles/tracing/docker-compose.jaeger.yml
    index 21b302be20..85230d7e3e 100644
    --- a/dockerfiles/tracing/docker-compose.jaeger.yml
    +++ b/dockerfiles/tracing/docker-compose.jaeger.yml
    @@ -4,7 +4,7 @@ services:
       apollo-router:
         container_name: apollo-router
         #build: ./router
    -    image: ghcr.io/apollographql/router:v1.30.0-alpha.1
    +    image: ghcr.io/apollographql/router:v1.30.0
         volumes:
           - ./supergraph.graphql:/etc/config/supergraph.graphql
           - ./router/jaeger.router.yaml:/etc/config/configuration.yaml
    diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml
    index 6e6657c280..d31d365126 100644
    --- a/dockerfiles/tracing/docker-compose.zipkin.yml
    +++ b/dockerfiles/tracing/docker-compose.zipkin.yml
    @@ -4,7 +4,7 @@ services:
       apollo-router:
         container_name: apollo-router
         build: ./router
    -    image: ghcr.io/apollographql/router:v1.30.0-alpha.1
    +    image: ghcr.io/apollographql/router:v1.30.0
         volumes:
           - ./supergraph.graphql:/etc/config/supergraph.graphql
           - ./router/zipkin.router.yaml:/etc/config/configuration.yaml
    diff --git a/docs/source/containerization/docker.mdx b/docs/source/containerization/docker.mdx
    index dcee10e6dd..9ddd11a47a 100644
    --- a/docs/source/containerization/docker.mdx
    +++ b/docs/source/containerization/docker.mdx
    @@ -11,7 +11,7 @@ The default behaviour of the router images is suitable for a quickstart or devel
     
     Note: The [docker documentation](https://docs.docker.com/engine/reference/run/) for the run command may be helpful when reading through the examples.
     
    -Note: The exact image version to use is your choice depending on which release you wish to use. In the following examples, replace `` with your chosen version. e.g.: `v1.30.0-alpha.1`
    +Note: The exact image version to use is your choice depending on which release you wish to use. In the following examples, replace `` with your chosen version. e.g.: `v1.30.0`
     
     ## Override the configuration
     
    diff --git a/docs/source/containerization/kubernetes.mdx b/docs/source/containerization/kubernetes.mdx
    index ef3774a62f..68a0fea56b 100644
    --- a/docs/source/containerization/kubernetes.mdx
    +++ b/docs/source/containerization/kubernetes.mdx
    @@ -13,7 +13,7 @@ import { Link } from 'gatsby';
     
     [Helm](https://helm.sh) is the package manager for kubernetes.
     
    -There is a complete [helm chart definition](https://github.com/apollographql/router/tree/v1.30.0-alpha.1/helm/chart/router) in the repo which illustrates how to use helm to deploy the router in kubernetes.
    +There is a complete [helm chart definition](https://github.com/apollographql/router/tree/v1.30.0/helm/chart/router) in the repo which illustrates how to use helm to deploy the router in kubernetes.
     
     In both the following examples, we are using helm to install the router:
      - into namespace "router-deploy" (create namespace if it doesn't exist)
    @@ -64,10 +64,10 @@ kind: ServiceAccount
     metadata:
       name: release-name-router
       labels:
    -    helm.sh/chart: router-1.30.0-alpha.1
    +    helm.sh/chart: router-1.30.0
         app.kubernetes.io/name: router
         app.kubernetes.io/instance: release-name
    -    app.kubernetes.io/version: "v1.30.0-alpha.1"
    +    app.kubernetes.io/version: "v1.30.0"
         app.kubernetes.io/managed-by: Helm
     ---
     # Source: router/templates/secret.yaml
    @@ -76,10 +76,10 @@ kind: Secret
     metadata:
       name: "release-name-router"
       labels:
    -    helm.sh/chart: router-1.30.0-alpha.1
    +    helm.sh/chart: router-1.30.0
         app.kubernetes.io/name: router
         app.kubernetes.io/instance: release-name
    -    app.kubernetes.io/version: "v1.30.0-alpha.1"
    +    app.kubernetes.io/version: "v1.30.0"
         app.kubernetes.io/managed-by: Helm
     data:
       managedFederationApiKey: "UkVEQUNURUQ="
    @@ -90,10 +90,10 @@ kind: ConfigMap
     metadata:
       name: release-name-router
       labels:
    -    helm.sh/chart: router-1.30.0-alpha.1
    +    helm.sh/chart: router-1.30.0
         app.kubernetes.io/name: router
         app.kubernetes.io/instance: release-name
    -    app.kubernetes.io/version: "v1.30.0-alpha.1"
    +    app.kubernetes.io/version: "v1.30.0"
         app.kubernetes.io/managed-by: Helm
     data:
       configuration.yaml: |
    @@ -117,10 +117,10 @@ kind: Service
     metadata:
       name: release-name-router
       labels:
    -    helm.sh/chart: router-1.30.0-alpha.1
    +    helm.sh/chart: router-1.30.0
         app.kubernetes.io/name: router
         app.kubernetes.io/instance: release-name
    -    app.kubernetes.io/version: "v1.30.0-alpha.1"
    +    app.kubernetes.io/version: "v1.30.0"
         app.kubernetes.io/managed-by: Helm
     spec:
       type: ClusterIP
    @@ -143,10 +143,10 @@ kind: Deployment
     metadata:
       name: release-name-router
       labels:
    -    helm.sh/chart: router-1.30.0-alpha.1
    +    helm.sh/chart: router-1.30.0
         app.kubernetes.io/name: router
         app.kubernetes.io/instance: release-name
    -    app.kubernetes.io/version: "v1.30.0-alpha.1"
    +    app.kubernetes.io/version: "v1.30.0"
         app.kubernetes.io/managed-by: Helm
       
       annotations:
    @@ -174,7 +174,7 @@ spec:
             - name: router
               securityContext:
                 {}
    -          image: "ghcr.io/apollographql/router:v1.30.0-alpha.1"
    +          image: "ghcr.io/apollographql/router:v1.30.0"
               imagePullPolicy: IfNotPresent
               args:
                 - --hot-reload
    @@ -226,10 +226,10 @@ kind: Pod
     metadata:
       name: "release-name-router-test-connection"
       labels:
    -    helm.sh/chart: router-1.30.0-alpha.1
    +    helm.sh/chart: router-1.30.0
         app.kubernetes.io/name: router
         app.kubernetes.io/instance: release-name
    -    app.kubernetes.io/version: "v1.30.0-alpha.1"
    +    app.kubernetes.io/version: "v1.30.0"
         app.kubernetes.io/managed-by: Helm
       annotations:
         "helm.sh/hook": test
    diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml
    index 8209043e7d..07dbc45a6a 100644
    --- a/helm/chart/router/Chart.yaml
    +++ b/helm/chart/router/Chart.yaml
    @@ -20,10 +20,10 @@ type: application
     # so it matches the shape of our release process and release automation.
     # By proxy of that decision, this version uses SemVer 2.0.0, though the prefix
     # of "v" is not included.
    -version: 1.30.0-alpha.1
    +version: 1.30.0
     
     # This is the version number of the application being deployed. This version number should be
     # incremented each time you make changes to the application. Versions are not expected to
     # follow Semantic Versioning. They should reflect the version the application is using.
     # It is recommended to use it with quotes.
    -appVersion: "v1.30.0-alpha.1"
    \ No newline at end of file
    +appVersion: "v1.30.0"
    \ No newline at end of file
    diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md
    index d93957af8d..d31ac65966 100644
    --- a/helm/chart/router/README.md
    +++ b/helm/chart/router/README.md
    @@ -2,7 +2,7 @@
     
     [router](https://github.com/apollographql/router) Rust Graph Routing runtime for Apollo Federation
     
    -![Version: 1.30.0-alpha.1](https://img.shields.io/badge/Version-1.30.0--alpha.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.30.0-alpha.1](https://img.shields.io/badge/AppVersion-v1.30.0--alpha.1-informational?style=flat-square)
    +![Version: 1.30.0](https://img.shields.io/badge/Version-1.30.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.30.0](https://img.shields.io/badge/AppVersion-v1.30.0-informational?style=flat-square)
     
     ## Prerequisites
     
    @@ -11,7 +11,7 @@
     ## Get Repo Info
     
     ```console
    -helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.30.0-alpha.1
    +helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.30.0
     ```
     
     ## Install Chart
    @@ -19,7 +19,7 @@ helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.30.0-alpha.
     **Important:** only helm3 is supported
     
     ```console
    -helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.30.0-alpha.1 --values my-values.yaml
    +helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.30.0 --values my-values.yaml
     ```
     
     _See [configuration](#configuration) below._
    diff --git a/licenses.html b/licenses.html
    index 744d6ecffe..28c09f483c 100644
    --- a/licenses.html
    +++ b/licenses.html
    @@ -47,11 +47,11 @@ 

    Overview of licenses:

  • MIT License (96)
  • Apache License 2.0 (62)
  • BSD 3-Clause "New" or "Revised" License (10)
  • -
  • ISC License (10)
  • +
  • ISC License (9)
  • +
  • Mozilla Public License 2.0 (3)
  • +
  • Creative Commons Zero v1.0 Universal (2)
  • Elastic License 2.0 (2)
  • -
  • Mozilla Public License 2.0 (2)
  • BSD 2-Clause "Simplified" License (1)
  • -
  • Creative Commons Zero v1.0 Universal (1)
  • OpenSSL License (1)
  • Unicode License Agreement - Data Files and Software (2016) (1)
  • @@ -10494,7 +10494,9 @@

    Apache License 2.0

    Used by:

    ../../LICENSE-APACHE
  • @@ -11146,9 +11148,7 @@

    Apache License 2.0

    Used by:

    • apollo-compiler
    • -
    • apollo-encoder
    • apollo-parser
    • -
    • apollo-smith
    • curve25519-dalek-derive
    • deadpool-runtime
    • deno-proc-macro-rules
    • @@ -11778,6 +11778,53 @@

      Creative Commons Zero v1.0 Universal

      Used by:

      +
      Creative Commons CC0 1.0 Universal
      +
      +<<beginOptional;name=ccOptionalIntro>> CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED HEREUNDER.  <<endOptional>>
      +
      +Statement of Purpose
      +
      +The laws of most jurisdictions throughout the world automatically confer exclusive Copyright and Related Rights (defined below) upon the creator and subsequent owner(s) (each and all, an "owner") of an original work of authorship and/or a database (each, a "Work").
      +
      +Certain owners wish to permanently relinquish those rights to a Work for the purpose of contributing to a commons of creative, cultural and scientific works ("Commons") that the public can reliably and without fear of later claims of infringement build upon, modify, incorporate in other works, reuse and redistribute as freely as possible in any form whatsoever and for any purposes, including without limitation commercial purposes. These owners may contribute to the Commons to promote the ideal of a free culture and the further production of creative, cultural and scientific works, or to gain reputation or greater distribution for their Work in part through the use and efforts of others.
      +
      +For these and/or other purposes and motivations, and without any expectation of additional consideration or compensation, the person associating CC0 with a Work (the "Affirmer"), to the extent that he or she is an owner of Copyright and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and publicly distribute the Work under its terms, with knowledge of his or her Copyright and Related Rights in the Work and the meaning and intended legal effect of CC0 on those rights.
      +
      +1. Copyright and Related Rights. A Work made available under CC0 may be protected by copyright and related or neighboring rights ("Copyright and Related Rights"). Copyright and Related Rights include, but are not limited to, the following:
      +
      +     i. the right to reproduce, adapt, distribute, perform, display, communicate, and translate a Work;
      +
      +     ii. moral rights retained by the original author(s) and/or performer(s);
      +
      +     iii. publicity and privacy rights pertaining to a person's image or likeness depicted in a Work;
      +
      +     iv. rights protecting against unfair competition in regards to a Work, subject to the limitations in paragraph 4(a), below;
      +
      +     v. rights protecting the extraction, dissemination, use and reuse of data in a Work;
      +
      +     vi. database rights (such as those arising under Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, and under any national implementation thereof, including any amended or successor version of such directive); and
      +
      +     vii. other similar, equivalent or corresponding rights throughout the world based on applicable law or treaty, and any national implementations thereof.
      +
      +2. Waiver. To the greatest extent permitted by, but not in contravention of, applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and unconditionally waives, abandons, and surrenders all of Affirmer's Copyright and Related Rights and associated claims and causes of action, whether now known or unknown (including existing as well as future claims and causes of action), in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each member of the public at large and to the detriment of Affirmer's heirs and successors, fully intending that such Waiver shall not be subject to revocation, rescission, cancellation, termination, or any other legal or equitable action to disrupt the quiet enjoyment of the Work by the public as contemplated by Affirmer's express Statement of Purpose.
      +
      +3. Public License Fallback. Should any part of the Waiver for any reason be judged legally invalid or ineffective under applicable law, then the Waiver shall be preserved to the maximum extent permitted taking into account Affirmer's express Statement of Purpose. In addition, to the extent the Waiver is so judged Affirmer hereby grants to each affected person a royalty-free, non transferable, non sublicensable, non exclusive, irrevocable and unconditional license to exercise Affirmer's Copyright and Related Rights in the Work (i) in all territories worldwide, (ii) for the maximum duration provided by applicable law or treaty (including future time extensions), (iii) in any current or future medium and for any number of copies, and (iv) for any purpose whatsoever, including without limitation commercial, advertising or promotional purposes (the "License"). The License shall be deemed effective as of the date CC0 was applied by Affirmer to the Work. Should any part of the License for any reason be judged legally invalid or ineffective under applicable law, such partial invalidity or ineffectiveness shall not invalidate the remainder of the License, and in such case Affirmer hereby affirms that he or she will not (i) exercise any of his or her remaining Copyright and Related Rights in the Work or (ii) assert any associated claims and causes of action with respect to the Work, in either case contrary to Affirmer's express Statement of Purpose.
      +
      +4. Limitations and Disclaimers.
      +
      +     a. No trademark or patent rights held by Affirmer are waived, abandoned, surrendered, licensed or otherwise affected by this document.
      +
      +     b. Affirmer offers the Work as-is and makes no representations or warranties of any kind concerning the Work, express, implied, statutory or otherwise, including without limitation warranties of title, merchantability, fitness for a particular purpose, non infringement, or the absence of latent or other defects, accuracy, or the present or absence of errors, whether or not discoverable, all to the greatest extent permissible under applicable law.
      +
      +     c. Affirmer disclaims responsibility for clearing rights of other persons that may apply to the Work or any use thereof, including without limitation any person's Copyright and Related Rights in the Work. Further, Affirmer disclaims responsibility for obtaining any necessary consents, permissions or other rights required for any use of the Work.
      +
      +     d. Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work. 
      + +
    • +

      Creative Commons Zero v1.0 Universal

      +

      Used by:

      +
      Creative Commons Legal Code
      @@ -12203,36 +12250,6 @@ 

      Used by:

      // WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN // ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF // OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -
      -
    • -
    • -

      ISC License

      -

      Used by:

      - -
      // Copyright 2021 Brian Smith.
      -//
      -// Permission to use, copy, modify, and/or distribute this software for any
      -// purpose with or without fee is hereby granted, provided that the above
      -// copyright notice and this permission notice appear in all copies.
      -//
      -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
      -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
      -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR
      -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
      -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
      -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
      -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
      -
      -#[test]
      -fn cert_without_extensions_test() {
      -    // Check the certificate is valid with
      -    // `openssl x509 -in cert_without_extensions.der -inform DER -text -noout`
      -    const CERT_WITHOUT_EXTENSIONS_DER: &[u8] = include_bytes!("cert_without_extensions.der");
      -
      -    assert!(webpki::EndEntityCert::try_from(CERT_WITHOUT_EXTENSIONS_DER).is_ok());
      -}
       
    • @@ -12329,6 +12346,7 @@

      ISC License

      Used by:

      ISC License:
       
      @@ -15502,7 +15520,6 @@ 

      Mozilla Public License 2.0

      Used by:

      Mozilla Public License Version 2.0
       ==================================
      @@ -15877,6 +15894,35 @@ 

      Used by:

      This Source Code Form is "Incompatible With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0. +
      +
    • +
    • +

      Mozilla Public License 2.0

      +

      Used by:

      + +
      This packge contains a modified version of ca-bundle.crt:
      +
      +ca-bundle.crt -- Bundle of CA Root Certificates
      +
      +Certificate data from Mozilla as of: Thu Nov  3 19:04:19 2011#
      +This is a bundle of X.509 certificates of public Certificate Authorities
      +(CA). These were automatically extracted from Mozilla's root certificates
      +file (certdata.txt).  This file can be found in the mozilla source tree:
      +http://mxr.mozilla.org/mozilla/source/security/nss/lib/ckfw/builtins/certdata.txt?raw=1#
      +It contains the certificates in PEM format and therefore
      +can be directly used with curl / libcurl / php_curl, or with
      +an Apache+mod_ssl webserver for SSL client authentication.
      +Just configure this file as the SSLCACertificateFile.#
      +
      +***** BEGIN LICENSE BLOCK *****
      +This Source Code Form is subject to the terms of the Mozilla Public License,
      +v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain
      +one at http://mozilla.org/MPL/2.0/.
      +
      +***** END LICENSE BLOCK *****
      +@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $
       
    • diff --git a/scripts/install.sh b/scripts/install.sh index a545a24875..318035641b 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="https://github.com/apollographql/router/releases/downloa # Router version defined in apollo-router's Cargo.toml # Note: Change this line manually during the release steps. -PACKAGE_VERSION="v1.30.0-alpha.1" +PACKAGE_VERSION="v1.30.0" download_binary() { downloader --check