From 8b95abdcf5265df78bfda7462c628c35d65dfb20 Mon Sep 17 00:00:00 2001 From: Gary Pennington Date: Mon, 21 Aug 2023 13:17:07 +0100 Subject: [PATCH 01/81] If there are extraLabels add them to all resources This extends the functionality of `extraLabels` so that, if they are defined, they will be templated for all resources created by the chart. Previously, they were only templated onto the Deployment resource. --- helm/chart/router/templates/configmap.yaml | 3 +++ helm/chart/router/templates/deployment.yaml | 4 ++-- helm/chart/router/templates/hpa.yaml | 3 +++ helm/chart/router/templates/ingress.yaml | 3 +++ helm/chart/router/templates/pdb.yaml | 3 +++ helm/chart/router/templates/secret.yaml | 3 +++ helm/chart/router/templates/service.yaml | 3 +++ helm/chart/router/templates/serviceaccount.yaml | 3 +++ helm/chart/router/templates/serviceentry.yaml | 3 +++ helm/chart/router/templates/supergraph-cm.yaml | 3 +++ helm/chart/router/templates/tests/test-connection.yaml | 3 +++ helm/chart/router/templates/virtualservice.yaml | 3 +++ 12 files changed, 35 insertions(+), 2 deletions(-) diff --git a/helm/chart/router/templates/configmap.yaml b/helm/chart/router/templates/configmap.yaml index a51f1e6be3..e7caffecaf 100644 --- a/helm/chart/router/templates/configmap.yaml +++ b/helm/chart/router/templates/configmap.yaml @@ -9,6 +9,9 @@ metadata: name: {{ $routerFullName }} labels: {{- include "router.labels" . | nindent 4 }} + {{- if .Values.extraLabels }} + {{- include "common.templatizeExtraLabels" . | trim | nindent 4 }} + {{- end }} data: configuration.yaml: | {{- toYaml $configuration | nindent 4 }} diff --git a/helm/chart/router/templates/deployment.yaml b/helm/chart/router/templates/deployment.yaml index 2e5962a509..1f6f10c08a 100644 --- a/helm/chart/router/templates/deployment.yaml +++ b/helm/chart/router/templates/deployment.yaml @@ -5,7 +5,7 @@ metadata: labels: {{- include "router.labels" . | nindent 4 }} {{- if .Values.extraLabels }} - {{- include "common.templatizeExtraLabels" . | nindent 4 }} + {{- include "common.templatizeExtraLabels" . | trim | nindent 4 }} {{- end }} {{/* There may not be much configuration so check that there is something */}} {{- if (((((.Values.router).configuration).telemetry).metrics).prometheus).enabled }} @@ -35,7 +35,7 @@ spec: labels: {{- include "router.selectorLabels" . | nindent 8 }} {{- if .Values.extraLabels }} - {{- include "common.templatizeExtraLabels" . | nindent 8 }} + {{- include "common.templatizeExtraLabels" . | trim | nindent 8 }} {{- end }} spec: {{- with .Values.imagePullSecrets }} diff --git a/helm/chart/router/templates/hpa.yaml b/helm/chart/router/templates/hpa.yaml index 8a7891da03..b9e1eeb5a6 100644 --- a/helm/chart/router/templates/hpa.yaml +++ b/helm/chart/router/templates/hpa.yaml @@ -9,6 +9,9 @@ metadata: name: {{ include "router.fullname" . }} labels: {{- include "router.labels" . | nindent 4 }} + {{- if .Values.extraLabels }} + {{- include "common.templatizeExtraLabels" . | trim | nindent 4 }} + {{- end }} spec: scaleTargetRef: apiVersion: apps/v1 diff --git a/helm/chart/router/templates/ingress.yaml b/helm/chart/router/templates/ingress.yaml index 355862b2d3..5f4bd976e1 100644 --- a/helm/chart/router/templates/ingress.yaml +++ b/helm/chart/router/templates/ingress.yaml @@ -7,6 +7,9 @@ metadata: name: {{ $fullName }} labels: {{- include "router.labels" . | nindent 4 }} + {{- if .Values.extraLabels }} + {{- include "common.templatizeExtraLabels" . | trim | nindent 4 }} + {{- end }} {{- with .Values.ingress.annotations }} annotations: {{- toYaml . | nindent 4 }} diff --git a/helm/chart/router/templates/pdb.yaml b/helm/chart/router/templates/pdb.yaml index 518fa5aea5..359a13af4d 100644 --- a/helm/chart/router/templates/pdb.yaml +++ b/helm/chart/router/templates/pdb.yaml @@ -6,6 +6,9 @@ metadata: name: {{ include "router.fullname" . }} labels: {{- include "router.labels" . | nindent 4 }} + {{- if .Values.extraLabels }} + {{- include "common.templatizeExtraLabels" . | trim | nindent 4 }} + {{- end }} spec: {{- if .Values.podDisruptionBudget.minAvailable }} minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} diff --git a/helm/chart/router/templates/secret.yaml b/helm/chart/router/templates/secret.yaml index 3fbecd863e..9f5a4b124d 100644 --- a/helm/chart/router/templates/secret.yaml +++ b/helm/chart/router/templates/secret.yaml @@ -5,6 +5,9 @@ metadata: name: {{ template "router.managedFederation.apiSecretName" . }} labels: {{- include "router.labels" . | nindent 4 }} + {{- if .Values.extraLabels }} + {{- include "common.templatizeExtraLabels" . | trim | nindent 4 }} + {{- end }} data: managedFederationApiKey: {{ default "MISSING" .Values.managedFederation.apiKey | b64enc | quote }} {{- end }} diff --git a/helm/chart/router/templates/service.yaml b/helm/chart/router/templates/service.yaml index 83990d91ef..57f59a2108 100644 --- a/helm/chart/router/templates/service.yaml +++ b/helm/chart/router/templates/service.yaml @@ -4,6 +4,9 @@ metadata: name: {{ include "router.fullname" . }} labels: {{- include "router.labels" . | nindent 4 }} + {{- if .Values.extraLabels }} + {{- include "common.templatizeExtraLabels" . | trim | nindent 4 }} + {{- end }} {{- with .Values.service.annotations }} annotations: {{- toYaml . | nindent 4 }} diff --git a/helm/chart/router/templates/serviceaccount.yaml b/helm/chart/router/templates/serviceaccount.yaml index 203f012bb0..b15b8eaf96 100644 --- a/helm/chart/router/templates/serviceaccount.yaml +++ b/helm/chart/router/templates/serviceaccount.yaml @@ -5,6 +5,9 @@ metadata: name: {{ include "router.serviceAccountName" . }} labels: {{- include "router.labels" . | nindent 4 }} + {{- if .Values.extraLabels }} + {{- include "common.templatizeExtraLabels" . | trim | nindent 4 }} + {{- end }} {{- with .Values.serviceAccount.annotations }} annotations: {{- toYaml . | nindent 4 }} diff --git a/helm/chart/router/templates/serviceentry.yaml b/helm/chart/router/templates/serviceentry.yaml index 774eddeb31..ddc43407b4 100644 --- a/helm/chart/router/templates/serviceentry.yaml +++ b/helm/chart/router/templates/serviceentry.yaml @@ -12,6 +12,9 @@ metadata: labels: app.fullName: {{ $fullName }} {{- include "router.labels" . | nindent 4 }} + {{- if .Values.extraLabels }} + {{- include "common.templatizeExtraLabels" . | trim | nindent 4 }} + {{- end }} {{- with .Values.serviceentry.annotations }} annotations: {{- toYaml . | nindent 4 }} diff --git a/helm/chart/router/templates/supergraph-cm.yaml b/helm/chart/router/templates/supergraph-cm.yaml index 40ff9d073a..3a5f2a362a 100644 --- a/helm/chart/router/templates/supergraph-cm.yaml +++ b/helm/chart/router/templates/supergraph-cm.yaml @@ -6,6 +6,9 @@ metadata: name: {{ $routerFullName }}-supergraph labels: {{- include "router.labels" . | nindent 4 }} + {{- if .Values.extraLabels }} + {{- include "common.templatizeExtraLabels" . | trim | nindent 4 }} + {{- end }} data: supergraph-schema.graphql: |- {{ .Values.supergraphFile | indent 4 }} diff --git a/helm/chart/router/templates/tests/test-connection.yaml b/helm/chart/router/templates/tests/test-connection.yaml index 0bf0612719..67b939bea0 100644 --- a/helm/chart/router/templates/tests/test-connection.yaml +++ b/helm/chart/router/templates/tests/test-connection.yaml @@ -4,6 +4,9 @@ metadata: name: "{{ include "router.fullname" . }}-test-connection" labels: {{- include "router.labels" . | nindent 4 }} + {{- if .Values.extraLabels }} + {{- include "common.templatizeExtraLabels" . | trim | nindent 4 }} + {{- end }} annotations: "helm.sh/hook": test spec: diff --git a/helm/chart/router/templates/virtualservice.yaml b/helm/chart/router/templates/virtualservice.yaml index d8a77afede..fe3ef21db2 100644 --- a/helm/chart/router/templates/virtualservice.yaml +++ b/helm/chart/router/templates/virtualservice.yaml @@ -11,6 +11,9 @@ metadata: namespace: {{ .Values.virtualservice.namespace }} labels: {{- include "router.labels" . | nindent 4 }} + {{- if .Values.extraLabels }} + {{- include "common.templatizeExtraLabels" . | trim | nindent 4 }} + {{- end }} {{- with .Values.ingress.annotations }} annotations: {{- toYaml . | nindent 4 }} From ff245d4541505fba5c64190fdeecf786fc056fa3 Mon Sep 17 00:00:00 2001 From: Gary Pennington Date: Mon, 21 Aug 2023 13:22:47 +0100 Subject: [PATCH 02/81] add labels to servicemonitor They were completely missing previously, labels and extraLabels. --- helm/chart/router/templates/servicemonitor.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/helm/chart/router/templates/servicemonitor.yaml b/helm/chart/router/templates/servicemonitor.yaml index a4654a1add..61f9c88368 100644 --- a/helm/chart/router/templates/servicemonitor.yaml +++ b/helm/chart/router/templates/servicemonitor.yaml @@ -3,6 +3,11 @@ apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: {{ .Release.Name }} + labels: + {{- include "router.labels" . | nindent 4 }} + {{- if .Values.extraLabels }} + {{- include "common.templatizeExtraLabels" . | trim | nindent 4 }} + {{- end }} spec: endpoints: - path: {{ include "router.prometheusMetricsPath" . }} From 81c7a773f9e8a6050d25a312adce20ca3f54b827 Mon Sep 17 00:00:00 2001 From: Gary Pennington Date: Mon, 21 Aug 2023 13:29:33 +0100 Subject: [PATCH 03/81] add a changeset --- .changesets/fix_garypen_helm_extra_labels.md | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .changesets/fix_garypen_helm_extra_labels.md diff --git a/.changesets/fix_garypen_helm_extra_labels.md b/.changesets/fix_garypen_helm_extra_labels.md new file mode 100644 index 0000000000..a370db43c1 --- /dev/null +++ b/.changesets/fix_garypen_helm_extra_labels.md @@ -0,0 +1,7 @@ +### Helm: If there are `extraLabels` add them to all resources ([PR #3622](https://github.com/apollographql/router/pull/3622)) + +This extends the functionality of `extraLabels` so that, if they are defined, they will be templated for all resources created by the chart. + +Previously, they were only templated onto the `Deployment` resource. + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/3622 \ No newline at end of file From 1a242e06620645e3ad3001ce8861c1076969f4c0 Mon Sep 17 00:00:00 2001 From: Gary Pennington Date: Tue, 22 Aug 2023 08:26:48 +0100 Subject: [PATCH 04/81] add co-author Co-authored-by: Bjoern Weidlich From 1aa8adf78f93a075d5fdbf48e719abbd927aaf61 Mon Sep 17 00:00:00 2001 From: Gary Pennington Date: Tue, 22 Aug 2023 08:39:41 +0100 Subject: [PATCH 05/81] update changeset to add co-author --- .changesets/fix_garypen_helm_extra_labels.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changesets/fix_garypen_helm_extra_labels.md b/.changesets/fix_garypen_helm_extra_labels.md index a370db43c1..4082915ff1 100644 --- a/.changesets/fix_garypen_helm_extra_labels.md +++ b/.changesets/fix_garypen_helm_extra_labels.md @@ -4,4 +4,4 @@ This extends the functionality of `extraLabels` so that, if they are defined, th Previously, they were only templated onto the `Deployment` resource. -By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/3622 \ No newline at end of file +By [@garypen](https://github.com/garypen) and [@bjoernw](https://github.com/bjoernw) in https://github.com/apollographql/router/pull/3622 From db2fa189a31013eae74f17e940a0e2ce3f97ab5f Mon Sep 17 00:00:00 2001 From: bryn Date: Tue, 29 Aug 2023 10:31:21 +0100 Subject: [PATCH 06/81] Only save CircleCI caches when running against dev --- .circleci/config.yml | 38 ++++++++++++++++++++++++-------------- 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 99093b5889..840e33a254 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -370,10 +370,14 @@ commands: keys: - "<< pipeline.parameters.merge_version >>-lint" - run: xtask lint - - save_cache: - key: "<< pipeline.parameters.merge_version >>-lint" - paths: - - target + - when: + condition: + equal: [ "dev", "<< pipeline.git.branch >>" ] + steps: + - save_cache: + key: "<< pipeline.parameters.merge_version >>-lint" + paths: + - target xtask_check_helm: steps: @@ -415,11 +419,14 @@ commands: # cargo-deny fetches a rustsec advisory DB, which has to happen on github.com over https - run: git config --global --unset-all url.ssh://git@github.com.insteadof - run: xtask check-compliance - - save_cache: - key: "<< pipeline.parameters.merge_version >>-compliance" - paths: - - target - + - when: + condition: + equal: [ "dev", "<< pipeline.git.branch >>" ] + steps: + - save_cache: + key: "<< pipeline.parameters.merge_version >>-compliance" + paths: + - target xtask_test: parameters: variant: @@ -437,11 +444,14 @@ commands: command: | find target/debug/deps -type f -size +50M -delete rm target/debug/router* - - save_cache: - key: "<< pipeline.parameters.merge_version >>-test-<< parameters.variant >>" - paths: - - target - + - when: + condition: + equal: [ "dev", "<< pipeline.git.branch >>" ] + steps: + - save_cache: + key: "<< pipeline.parameters.merge_version >>-test-<< parameters.variant >>" + paths: + - target jobs: lint: environment: From 4ebf850212a77979aca033cafff7d1a9f0e3a3e8 Mon Sep 17 00:00:00 2001 From: bryn Date: Mon, 28 Aug 2023 19:21:49 +0100 Subject: [PATCH 07/81] The metrics layer was soercing all metrics attributes to string. This is now fixed. In addition, logic that would have printed to stderr has been removed, and instead metrics values and attributes that are the wrong type are now silently ignored. Fixes #3687 --- apollo-router/src/configuration/metrics.rs | 54 +++++++++-- ...etrics__test__metrics@apq.router.yaml.snap | 6 +- ...st__metrics@authorization.router.yaml.snap | 4 +- ...@authorization_directives.router.yaml.snap | 4 +- ...test__metrics@coprocessor.router.yaml.snap | 12 +-- ...s__test__metrics@entities.router.yaml.snap | 18 ++-- ...ics__test__metrics@limits.router.yaml.snap | 16 ++-- ...metrics@persisted_queries.router.yaml.snap | 6 +- ...st__metrics@subscriptions.router.yaml.snap | 10 +- ...__test__metrics@telemetry.router.yaml.snap | 12 +-- ...__metrics@traffic_shaping.router.yaml.snap | 16 ++-- .../src/plugins/telemetry/metrics/layer.rs | 91 ++++++++++--------- 12 files changed, 148 insertions(+), 101 deletions(-) diff --git a/apollo-router/src/configuration/metrics.rs b/apollo-router/src/configuration/metrics.rs index 0e8d5e74fb..eb459b92e9 100644 --- a/apollo-router/src/configuration/metrics.rs +++ b/apollo-router/src/configuration/metrics.rs @@ -5,6 +5,7 @@ use std::time::Duration; use jsonpath_rust::JsonPathInst; use paste::paste; +use serde::Serialize; use serde_json::Value; use tokio::sync::OwnedSemaphorePermit; @@ -16,7 +17,39 @@ pub(crate) struct MetricsHandle { pub(crate) struct Metrics { yaml: Value, - metrics: HashMap)>, + metrics: HashMap)>, +} + +enum AttributeValue { + Bool(bool), + U64(u64), + I64(i64), + F64(f64), + String(String), +} + +impl Serialize for AttributeValue { + fn serialize(&self, serializer: S) -> Result { + match self { + AttributeValue::Bool(value) => serializer.serialize_bool(*value), + AttributeValue::U64(value) => serializer.serialize_u64(*value), + AttributeValue::I64(value) => serializer.serialize_i64(*value), + AttributeValue::F64(value) => serializer.serialize_f64(*value), + AttributeValue::String(value) => serializer.serialize_str(value), + } + } +} + +impl AttributeValue { + fn dyn_value(self: &AttributeValue) -> &dyn tracing::Value { + match self { + AttributeValue::Bool(value) => value as &dyn tracing::Value, + AttributeValue::U64(value) => value as &dyn tracing::Value, + AttributeValue::I64(value) => value as &dyn tracing::Value, + AttributeValue::F64(value) => value as &dyn tracing::Value, + AttributeValue::String(value) => value as &dyn tracing::Value, + } + } } impl Metrics { @@ -98,12 +131,19 @@ impl Metrics { let attr_name = stringify!([<$($attr __ )+>]).to_string(); match JsonPathInst::from_str($attr_path).expect("json path must be valid").find_slice(value).into_iter().next().as_deref() { // If the value is an object we can only state that it is set, but not what it is set to. - Some(Value::Object(_value)) => {attributes.insert(attr_name, "true".to_string());}, - Some(Value::Array(value)) if !value.is_empty() => {attributes.insert(attr_name, "true".to_string());}, + Some(Value::Object(_value)) => {attributes.insert(attr_name, AttributeValue::Bool(true));}, + Some(Value::Array(value)) if !value.is_empty() => {attributes.insert(attr_name, AttributeValue::Bool(true));}, // Scalars can be logged as is. - Some(value) => {attributes.insert(attr_name, value.to_string());}, + Some(Value::Number(value)) if value.is_f64() => {attributes.insert(attr_name, AttributeValue::F64(value.as_f64().expect("checked, qed")));}, + Some(Value::Number(value)) if value.is_i64() => {attributes.insert(attr_name, AttributeValue::I64(value.as_i64().expect("checked, qed")));}, + Some(Value::Number(value)) => {attributes.insert(attr_name, AttributeValue::U64(value.as_u64().expect("checked, qed")));}, + Some(Value::String(value)) => {attributes.insert(attr_name, AttributeValue::String(value.clone()));}, + Some(Value::Bool(value)) => {attributes.insert(attr_name, AttributeValue::Bool(*value));}, + // If the value is not set we don't specify the attribute. - None => {attributes.insert(attr_name, "false".to_string());}, + None => {attributes.insert(attr_name, AttributeValue::Bool(false));}, + + _ => {}, };)+ (1, attributes) } @@ -113,7 +153,7 @@ impl Metrics { let mut attributes = HashMap::new(); $( let attr_name = stringify!([<$($attr __ )+>]).to_string(); - attributes.insert(attr_name, "false".to_string()); + attributes.insert(attr_name, AttributeValue::Bool(false)); )+ (0, attributes) } @@ -122,7 +162,7 @@ impl Metrics { // Now log the metric paste!{ - tracing::info!($($metric).+ = metric.0, $($($attr).+ = metric.1.get(stringify!([<$($attr __ )+>])).expect("attribute must be in map")),+); + tracing::info!($($metric).+ = metric.0, $($($attr).+ = metric.1.get(stringify!([<$($attr __ )+>])).expect("attribute must be in map").dyn_value()),+); } }; } diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@apq.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@apq.router.yaml.snap index bf5efaf603..9108dfc7a1 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@apq.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@apq.router.yaml.snap @@ -4,7 +4,7 @@ expression: "&metrics.metrics" --- value.apollo.router.config.apq: - 1 - - opt__router__cache__in_memory__: "true" - opt__router__cache__redis__: "true" - opt__subgraph__: "true" + - opt__router__cache__in_memory__: true + opt__router__cache__redis__: true + opt__subgraph__: true diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization.router.yaml.snap index 11f9160614..e45a4962f7 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization.router.yaml.snap @@ -4,6 +4,6 @@ expression: "&metrics.metrics" --- value.apollo.router.config.authorization: - 1 - - opt__directives__: "false" - opt__require_authentication__: "true" + - opt__directives__: false + opt__require_authentication__: true diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization_directives.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization_directives.router.yaml.snap index 61b5d4c144..38462ec606 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization_directives.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization_directives.router.yaml.snap @@ -4,6 +4,6 @@ expression: "&metrics.metrics" --- value.apollo.router.config.authorization: - 1 - - opt__directives__: "true" - opt__require_authentication__: "false" + - opt__directives__: true + opt__require_authentication__: false diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@coprocessor.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@coprocessor.router.yaml.snap index b5eb1df764..bdc1a7899b 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@coprocessor.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@coprocessor.router.yaml.snap @@ -4,10 +4,10 @@ expression: "&metrics.metrics" --- value.apollo.router.config.coprocessor: - 1 - - opt__router__request__: "true" - opt__router__response__: "true" - opt__subgraph__request__: "true" - opt__subgraph__response__: "true" - opt__supergraph__request__: "false" - opt__supergraph__response__: "false" + - opt__router__request__: true + opt__router__response__: true + opt__subgraph__request__: true + opt__subgraph__response__: true + opt__supergraph__request__: false + opt__supergraph__response__: false diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@entities.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@entities.router.yaml.snap index 1bce92d5c8..e4fe10d957 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@entities.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@entities.router.yaml.snap @@ -4,15 +4,15 @@ expression: "&metrics.metrics" --- value.apollo.router.config.entities: - 1 - - opt__cache__: "true" + - opt__cache__: true value.apollo.router.config.traffic_shaping: - 1 - - opt__router__rate_limit__: "false" - opt__router__timout__: "false" - opt__subgraph__compression__: "false" - opt__subgraph__deduplicate_query__: "false" - opt__subgraph__http2__: "false" - opt__subgraph__rate_limit__: "false" - opt__subgraph__retry__: "false" - opt__subgraph__timeout__: "false" + - opt__router__rate_limit__: false + opt__router__timout__: false + opt__subgraph__compression__: false + opt__subgraph__deduplicate_query__: false + opt__subgraph__http2__: false + opt__subgraph__rate_limit__: false + opt__subgraph__retry__: false + opt__subgraph__timeout__: false diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@limits.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@limits.router.yaml.snap index 53807bab66..055f60152d 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@limits.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@limits.router.yaml.snap @@ -4,12 +4,12 @@ expression: "&metrics.metrics" --- value.apollo.router.config.limits: - 1 - - opt__operation__max_aliases__: "true" - opt__operation__max_depth__: "true" - opt__operation__max_height__: "true" - opt__operation__max_root_fields__: "true" - opt__operation__warn_only__: "true" - opt__parser__max_recursion__: "true" - opt__parser__max_tokens__: "true" - opt__request__max_size__: "true" + - opt__operation__max_aliases__: true + opt__operation__max_depth__: true + opt__operation__max_height__: true + opt__operation__max_root_fields__: true + opt__operation__warn_only__: true + opt__parser__max_recursion__: true + opt__parser__max_tokens__: true + opt__request__max_size__: true diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@persisted_queries.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@persisted_queries.router.yaml.snap index 507f9c756f..72b803ca49 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@persisted_queries.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@persisted_queries.router.yaml.snap @@ -4,7 +4,7 @@ expression: "&metrics.metrics" --- value.apollo.router.config.persisted_queries: - 1 - - opt__log_unknown__: "true" - opt__safelist__enabled__: "true" - opt__safelist__require_id__: "true" + - opt__log_unknown__: true + opt__safelist__enabled__: true + opt__safelist__require_id__: true diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@subscriptions.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@subscriptions.router.yaml.snap index 3709a1603d..a019d34928 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@subscriptions.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@subscriptions.router.yaml.snap @@ -4,9 +4,9 @@ expression: "&metrics.metrics" --- value.apollo.router.config.subscriptions: - 1 - - opt__deduplication__: "false" - opt__max_opened__: "true" - opt__mode__callback__: "true" - opt__mode__passthrough__: "true" - opt__queue_capacity__: "true" + - opt__deduplication__: false + opt__max_opened__: true + opt__mode__callback__: true + opt__mode__passthrough__: true + opt__queue_capacity__: true diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@telemetry.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@telemetry.router.yaml.snap index 7e02cf7f31..8ea0c00cab 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@telemetry.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@telemetry.router.yaml.snap @@ -4,10 +4,10 @@ expression: "&metrics.metrics" --- value.apollo.router.config.telemetry: - 1 - - opt__metrics__otlp__: "true" - opt__metrics__prometheus__: "true" - opt__tracing__datadog__: "true" - opt__tracing__jaeger__: "true" - opt__tracing__otlp__: "true" - opt__tracing__zipkin__: "true" + - opt__metrics__otlp__: true + opt__metrics__prometheus__: true + opt__tracing__datadog__: true + opt__tracing__jaeger__: true + opt__tracing__otlp__: true + opt__tracing__zipkin__: true diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@traffic_shaping.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@traffic_shaping.router.yaml.snap index 1cdb685e7d..ab53cd0460 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@traffic_shaping.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@traffic_shaping.router.yaml.snap @@ -4,12 +4,12 @@ expression: "&metrics.metrics" --- value.apollo.router.config.traffic_shaping: - 1 - - opt__router__rate_limit__: "true" - opt__router__timout__: "true" - opt__subgraph__compression__: "true" - opt__subgraph__deduplicate_query__: "true" - opt__subgraph__http2__: "true" - opt__subgraph__rate_limit__: "true" - opt__subgraph__retry__: "true" - opt__subgraph__timeout__: "true" + - opt__router__rate_limit__: true + opt__router__timout__: true + opt__subgraph__compression__: true + opt__subgraph__deduplicate_query__: true + opt__subgraph__http2__: true + opt__subgraph__rate_limit__: true + opt__subgraph__retry__: true + opt__subgraph__timeout__: true diff --git a/apollo-router/src/plugins/telemetry/metrics/layer.rs b/apollo-router/src/plugins/telemetry/metrics/layer.rs index c195891d7b..c5b5e6bd8c 100644 --- a/apollo-router/src/plugins/telemetry/metrics/layer.rs +++ b/apollo-router/src/plugins/telemetry/metrics/layer.rs @@ -24,8 +24,6 @@ use super::METRIC_PREFIX_HISTOGRAM; use super::METRIC_PREFIX_MONOTONIC_COUNTER; use super::METRIC_PREFIX_VALUE; -const I64_MAX: u64 = i64::MAX as u64; - #[derive(Default)] pub(crate) struct Instruments { u64_counter: MetricsMap>, @@ -162,66 +160,75 @@ pub(crate) struct MetricVisitor<'a> { } impl<'a> Visit for MetricVisitor<'a> { - fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { - // Do not display the log content - if field.name() != "message" { + fn record_f64(&mut self, field: &Field, value: f64) { + if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { + self.metric = Some((metric_name, InstrumentType::CounterF64(value))); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { + self.metric = Some((metric_name, InstrumentType::UpDownCounterF64(value))); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { + self.metric = Some((metric_name, InstrumentType::HistogramF64(value))); + } else { self.custom_attributes.push(KeyValue::new( Key::from_static_str(field.name()), - Value::from(format!("{value:?}")), + Value::from(value), )); } } - fn record_str(&mut self, field: &Field, value: &str) { - self.custom_attributes.push(KeyValue::new( - Key::from_static_str(field.name()), - Value::from(value.to_string()), - )); + fn record_i64(&mut self, field: &Field, value: i64) { + if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { + tracing::error!(metric_name, "monotonic counter must be u64"); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { + self.metric = Some((metric_name, InstrumentType::UpDownCounterI64(value))); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { + self.metric = Some((metric_name, InstrumentType::HistogramI64(value))); + } else { + self.custom_attributes.push(KeyValue::new( + Key::from_static_str(field.name()), + Value::from(value), + )); + } } fn record_u64(&mut self, field: &Field, value: u64) { if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { self.metric = Some((metric_name, InstrumentType::CounterU64(value))); - } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { - if value <= I64_MAX { - self.metric = Some((metric_name, InstrumentType::UpDownCounterI64(value as i64))); - } else { - eprintln!( - "[tracing-opentelemetry]: Received Counter metric, but \ - provided u64: {value} is greater than i64::MAX. Ignoring \ - this metric." - ); - } } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { self.metric = Some((metric_name, InstrumentType::HistogramU64(value))); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { self.metric = Some((metric_name, InstrumentType::GaugeU64(value))); - } else { - self.record_debug(field, &value); } } - fn record_f64(&mut self, field: &Field, value: f64) { - if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { - self.metric = Some((metric_name, InstrumentType::CounterF64(value))); - } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { - self.metric = Some((metric_name, InstrumentType::UpDownCounterF64(value))); - } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { - self.metric = Some((metric_name, InstrumentType::HistogramF64(value))); - } else { - self.record_debug(field, &value); - } + fn record_i128(&mut self, field: &Field, _value: i128) { + tracing::error!(name = field.name(), "metric attribute cannot be i128"); } - fn record_i64(&mut self, field: &Field, value: i64) { - if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { - self.metric = Some((metric_name, InstrumentType::CounterU64(value as u64))); - } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { - self.metric = Some((metric_name, InstrumentType::UpDownCounterI64(value))); - } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { - self.metric = Some((metric_name, InstrumentType::HistogramI64(value))); - } else { - self.record_debug(field, &value); + fn record_u128(&mut self, field: &Field, _value: u128) { + tracing::error!(name = field.name(), "metric attribute cannot be u128"); + } + + fn record_bool(&mut self, field: &Field, value: bool) { + self.custom_attributes.push(KeyValue::new( + Key::from_static_str(field.name()), + Value::from(value), + )); + } + + fn record_str(&mut self, field: &Field, value: &str) { + self.custom_attributes.push(KeyValue::new( + Key::from_static_str(field.name()), + Value::from(value.to_string()), + )); + } + + fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { + // Do not display the log content + if field.name() != "message" { + self.custom_attributes.push(KeyValue::new( + Key::from_static_str(field.name()), + Value::from(format!("{value:?}")), + )); } } } From 4109a2ec374d3a161c489f234343c79fcdca34ae Mon Sep 17 00:00:00 2001 From: bryn Date: Mon, 28 Aug 2023 20:32:09 +0100 Subject: [PATCH 08/81] Fix metrics attribute types Metrics attributes were being coerced to strings. This is now fixed. In addition, the logic around types accepted as metrics attributes has been simplified. It will log and ignore values of the wrong type. Fixes #3691 --- .changesets/fix_bryn_fix_metrics_typing.md | 6 ++++++ .../src/plugins/telemetry/metrics/layer.rs | 15 ++++++++++++--- 2 files changed, 18 insertions(+), 3 deletions(-) create mode 100644 .changesets/fix_bryn_fix_metrics_typing.md diff --git a/.changesets/fix_bryn_fix_metrics_typing.md b/.changesets/fix_bryn_fix_metrics_typing.md new file mode 100644 index 0000000000..b4f26d67ed --- /dev/null +++ b/.changesets/fix_bryn_fix_metrics_typing.md @@ -0,0 +1,6 @@ +### Fix metrics attribute types ([Issue #3687](https://github.com/apollographql/router/issues/3687)) + +Metrics attributes were being coerced to strings. This is now fixed. +In addition, the logic around types accepted as metrics attributes has been simplified. It will log and ignore values of the wrong type. + +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/3701 diff --git a/apollo-router/src/plugins/telemetry/metrics/layer.rs b/apollo-router/src/plugins/telemetry/metrics/layer.rs index c5b5e6bd8c..63795ea035 100644 --- a/apollo-router/src/plugins/telemetry/metrics/layer.rs +++ b/apollo-router/src/plugins/telemetry/metrics/layer.rs @@ -177,7 +177,10 @@ impl<'a> Visit for MetricVisitor<'a> { fn record_i64(&mut self, field: &Field, value: i64) { if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { - tracing::error!(metric_name, "monotonic counter must be u64"); + tracing::error!( + metric_name, + "monotonic counter must be u64, this metric will be ignored" + ); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { self.metric = Some((metric_name, InstrumentType::UpDownCounterI64(value))); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { @@ -201,11 +204,17 @@ impl<'a> Visit for MetricVisitor<'a> { } fn record_i128(&mut self, field: &Field, _value: i128) { - tracing::error!(name = field.name(), "metric attribute cannot be i128"); + tracing::error!( + name = field.name(), + "metric attribute cannot be i128, this attribute will be ignored" + ); } fn record_u128(&mut self, field: &Field, _value: u128) { - tracing::error!(name = field.name(), "metric attribute cannot be u128"); + tracing::error!( + name = field.name(), + "metric attribute cannot be u128, this attribute will be ignored" + ); } fn record_bool(&mut self, field: &Field, value: bool) { From 9aabf8d0535b72628369eefe1e4343754346b13b Mon Sep 17 00:00:00 2001 From: bryn Date: Tue, 29 Aug 2023 14:54:14 +0100 Subject: [PATCH 09/81] Ensure that errors from metrics layer are only output when it is actually a metric being handled. To make this work metric attributes MUST be declared after the metric value. This is checked via a cheap boolean indicating if we have already ignored attributes upton metric initialization. Fixes #3691 --- .../src/plugins/telemetry/metrics/layer.rs | 215 +++++++++++++++--- apollo-router/src/plugins/telemetry/mod.rs | 2 +- 2 files changed, 187 insertions(+), 30 deletions(-) diff --git a/apollo-router/src/plugins/telemetry/metrics/layer.rs b/apollo-router/src/plugins/telemetry/metrics/layer.rs index 63795ea035..f19c1cd691 100644 --- a/apollo-router/src/plugins/telemetry/metrics/layer.rs +++ b/apollo-router/src/plugins/telemetry/metrics/layer.rs @@ -157,21 +157,41 @@ pub(crate) struct MetricVisitor<'a> { pub(crate) metric: Option<(&'static str, InstrumentType)>, pub(crate) custom_attributes: Vec, pub(crate) meter: &'a Meter, + attributes_ignored: bool, +} + +impl<'a> MetricVisitor<'a> { + fn set_metric(&mut self, name: &'static str, instrument_type: InstrumentType) { + self.metric = Some((name, instrument_type)); + if self.attributes_ignored { + tracing::error!( + metric_name = name, + "metric attributes must be declared after the metric value. Some attributes have been ignored" + ); + } + } } impl<'a> Visit for MetricVisitor<'a> { fn record_f64(&mut self, field: &Field, value: f64) { if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { - self.metric = Some((metric_name, InstrumentType::CounterF64(value))); + self.set_metric(metric_name, InstrumentType::CounterF64(value)); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { - self.metric = Some((metric_name, InstrumentType::UpDownCounterF64(value))); + self.set_metric(metric_name, InstrumentType::UpDownCounterF64(value)); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { - self.metric = Some((metric_name, InstrumentType::HistogramF64(value))); - } else { + self.set_metric(metric_name, InstrumentType::HistogramF64(value)); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { + tracing::error!( + metric_name, + "gauge must be u64. This metric will be ignored" + ); + } else if self.metric.is_some() { self.custom_attributes.push(KeyValue::new( Key::from_static_str(field.name()), Value::from(value), )); + } else { + self.attributes_ignored = true } } @@ -179,65 +199,201 @@ impl<'a> Visit for MetricVisitor<'a> { if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { tracing::error!( metric_name, - "monotonic counter must be u64, this metric will be ignored" + "monotonic counter must be u64 or f64. This metric will be ignored" ); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { - self.metric = Some((metric_name, InstrumentType::UpDownCounterI64(value))); + self.set_metric(metric_name, InstrumentType::UpDownCounterI64(value)); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { - self.metric = Some((metric_name, InstrumentType::HistogramI64(value))); - } else { + self.set_metric(metric_name, InstrumentType::HistogramI64(value)); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { + tracing::error!( + metric_name, + "gauge must be u64. This metric will be ignored" + ); + } else if self.metric.is_some() { self.custom_attributes.push(KeyValue::new( Key::from_static_str(field.name()), Value::from(value), )); + } else { + self.attributes_ignored = true } } fn record_u64(&mut self, field: &Field, value: u64) { if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { - self.metric = Some((metric_name, InstrumentType::CounterU64(value))); + self.set_metric(metric_name, InstrumentType::CounterU64(value)); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { + tracing::error!( + metric_name, + "counter must be i64. This metric will be ignored" + ); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { - self.metric = Some((metric_name, InstrumentType::HistogramU64(value))); + self.set_metric(metric_name, InstrumentType::HistogramU64(value)); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { - self.metric = Some((metric_name, InstrumentType::GaugeU64(value))); + self.set_metric(metric_name, InstrumentType::GaugeU64(value)); + } else if self.metric.is_some() { + tracing::error!( + name = field.name(), + "metric attribute must be i64, f64, string or bool. This attribute will be ignored" + ); + } else { + self.attributes_ignored = true } } fn record_i128(&mut self, field: &Field, _value: i128) { - tracing::error!( - name = field.name(), - "metric attribute cannot be i128, this attribute will be ignored" - ); + if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { + tracing::error!( + metric_name, + "monotonic counter must be u64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { + tracing::error!( + metric_name, + "counter must be i64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { + tracing::error!( + metric_name, + "histogram must be u64, i64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { + tracing::error!( + metric_name, + "gauge must be u64. This metric will be ignored" + ); + } else if self.metric.is_some() { + tracing::error!( + name = field.name(), + "metric attribute must be i64, f64, string or bool. This attribute will be ignored" + ); + } else { + self.attributes_ignored = true + } } fn record_u128(&mut self, field: &Field, _value: u128) { - tracing::error!( - name = field.name(), - "metric attribute cannot be u128, this attribute will be ignored" - ); + if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { + tracing::error!( + metric_name, + "monotonic counter must be u64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { + tracing::error!( + metric_name, + "counter must be i64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { + tracing::error!( + metric_name, + "histogram must be u64, i64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { + tracing::error!( + metric_name, + "gauge must be u64. This metric will be ignored" + ); + } else if self.metric.is_some() { + tracing::error!( + name = field.name(), + "metric attribute must be i64, f64, string or bool. This attribute will be ignored" + ); + } else { + self.attributes_ignored = true + } } fn record_bool(&mut self, field: &Field, value: bool) { - self.custom_attributes.push(KeyValue::new( - Key::from_static_str(field.name()), - Value::from(value), - )); + if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { + tracing::error!( + metric_name, + "monotonic counter must be u64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { + tracing::error!( + metric_name, + "counter must be i64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { + tracing::error!( + metric_name, + "histogram must be u64, i64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { + tracing::error!( + metric_name, + "gauge must be u64. This metric will be ignored" + ); + } else if self.metric.is_some() { + self.custom_attributes.push(KeyValue::new( + Key::from_static_str(field.name()), + Value::from(value), + )); + } else { + self.attributes_ignored = true + } } fn record_str(&mut self, field: &Field, value: &str) { - self.custom_attributes.push(KeyValue::new( - Key::from_static_str(field.name()), - Value::from(value.to_string()), - )); + if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { + tracing::error!( + metric_name, + "monotonic counter must be u64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { + tracing::error!( + metric_name, + "counter must be i64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { + tracing::error!( + metric_name, + "histogram must be u64, i64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { + tracing::error!( + metric_name, + "gauge must be u64. This metric will be ignored" + ); + } else if self.metric.is_some() { + self.custom_attributes.push(KeyValue::new( + Key::from_static_str(field.name()), + Value::from(value.to_string()), + )); + } else { + self.attributes_ignored = true + } } fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { - // Do not display the log content - if field.name() != "message" { + if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { + tracing::error!( + metric_name, + "monotonic counter must be u64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { + tracing::error!( + metric_name, + "counter must be i64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { + tracing::error!( + metric_name, + "histogram must be u64, i64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { + tracing::error!( + metric_name, + "gauge must be u64. This metric will be ignored" + ); + } else if self.metric.is_some() { self.custom_attributes.push(KeyValue::new( Key::from_static_str(field.name()), Value::from(format!("{value:?}")), )); + } else { + self.attributes_ignored = true } } } @@ -281,6 +437,7 @@ where meter: &self.meter, metric: None, custom_attributes: Vec::new(), + attributes_ignored: false, }; event.record(&mut metric_visitor); metric_visitor.finish(); diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index b61d5fbf21..14cfbd1d5c 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -840,7 +840,7 @@ impl Telemetry { } ::tracing::info!( monotonic_counter.apollo.router.operations = 1u64, - http.response.status_code = parts.status.as_u16(), + http.response.status_code = parts.status.as_u16() as i64, ); let response = http::Response::from_parts( parts, From 92593a1a28d44d6f4e868138f2aaf8976f5d6c43 Mon Sep 17 00:00:00 2001 From: bryn Date: Tue, 29 Aug 2023 15:20:54 +0100 Subject: [PATCH 10/81] Update router bridge counter metrics to correct type --- .../src/query_planner/bridge_query_planner.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs index e4929420de..7805d127f9 100644 --- a/apollo-router/src/query_planner/bridge_query_planner.rs +++ b/apollo-router/src/query_planner/bridge_query_planner.rs @@ -91,7 +91,7 @@ impl BridgeQueryPlanner { if has_validation_errors && !schema.has_errors() { tracing::warn!( - monotonic_counter.apollo.router.validation = 1, + monotonic_counter.apollo.router.validation = 1u64, validation.source = VALIDATION_SOURCE_SCHEMA, validation.result = VALIDATION_FALSE_NEGATIVE, "validation mismatch: JS query planner reported a schema validation error, but apollo-rs did not" @@ -106,7 +106,7 @@ impl BridgeQueryPlanner { if configuration.experimental_graphql_validation_mode == GraphQLValidationMode::Both { if schema.has_errors() { tracing::warn!( - monotonic_counter.apollo.router.validation = 1, + monotonic_counter.apollo.router.validation = 1u64, validation.source = VALIDATION_SOURCE_SCHEMA, validation.result = VALIDATION_FALSE_POSITIVE, "validation mismatch: apollo-rs reported a schema validation error, but JS query planner did not" @@ -114,7 +114,7 @@ impl BridgeQueryPlanner { } else { // false_negative was an early return so we know it was correct here tracing::info!( - monotonic_counter.apollo.router.validation = 1, + monotonic_counter.apollo.router.validation = 1u64, validation.source = VALIDATION_SOURCE_SCHEMA, validation.result = VALIDATION_MATCH ); @@ -286,7 +286,7 @@ impl BridgeQueryPlanner { match (is_validation_error, &selections.validation_error) { (false, Some(_)) => { tracing::warn!( - monotonic_counter.apollo.router.validation = 1, + monotonic_counter.apollo.router.validation = 1u64, validation.source = VALIDATION_SOURCE_OPERATION, validation.result = VALIDATION_FALSE_POSITIVE, "validation mismatch: JS query planner did not report query validation error, but apollo-rs did" @@ -294,7 +294,7 @@ impl BridgeQueryPlanner { } (true, None) => { tracing::warn!( - monotonic_counter.apollo.router.validation = 1, + monotonic_counter.apollo.router.validation = 1u64, validation.source = VALIDATION_SOURCE_OPERATION, validation.result = VALIDATION_FALSE_NEGATIVE, "validation mismatch: apollo-rs did not report query validation error, but JS query planner did" @@ -302,7 +302,7 @@ impl BridgeQueryPlanner { } // if JS and Rust implementations agree, we return the JS result for now. _ => tracing::info!( - monotonic_counter.apollo.router.validation = 1, + monotonic_counter.apollo.router.validation = 1u64, validation.source = VALIDATION_SOURCE_OPERATION, validation.result = VALIDATION_MATCH, ), From e32cab7e22346531ea202b3ff6f15e8ae7e7fca3 Mon Sep 17 00:00:00 2001 From: Bryn Cooke Date: Tue, 29 Aug 2023 16:20:09 +0100 Subject: [PATCH 11/81] Uplink connections now reuse reqwest client (#3703) Previously uplink requests created a new reqwest client each time, this may cause CPU spikes especially on OSX. Fixes #3333 **Checklist** Complete the checklist (and note appropriate exceptions) before a final PR is raised. - [ ] Changes are compatible[^1] - [ ] Documentation[^2] completed - [ ] Performance impact assessed and acceptable - Tests added and passing[^3] - [ ] Unit Tests - [ ] Integration Tests - [ ] Manual Tests **Exceptions** *Note any exceptions here* **Notes** [^1]. It may be appropriate to bring upcoming changes to the attention of other (impacted) groups. Please endeavour to do this before seeking PR approval. The mechanism for doing this will vary considerably, so use your judgement as to how and when to do this. [^2]. Configuration is an important part of many changes. Where applicable please try to document configuration examples. [^3]. Tick whichever testing boxes are applicable. If you are adding Manual Tests: - please document the manual testing (extensively) in the Exceptions. - please raise a separate issue to automate the test and label it (or ask for it to be labeled) as `manual test` Co-authored-by: bryn --- .changesets/maint_bryn_uplink_client.md | 6 +++++ apollo-router/src/uplink/mod.rs | 29 +++++++++++++++---------- 2 files changed, 23 insertions(+), 12 deletions(-) create mode 100644 .changesets/maint_bryn_uplink_client.md diff --git a/.changesets/maint_bryn_uplink_client.md b/.changesets/maint_bryn_uplink_client.md new file mode 100644 index 0000000000..7c9776f106 --- /dev/null +++ b/.changesets/maint_bryn_uplink_client.md @@ -0,0 +1,6 @@ +### Uplink connections now reuse reqwest client ([Issue #3333](https://github.com/apollographql/router/issues/3333)) + +Previously uplink requests created a new reqwest client each time, this may cause CPU spikes especially on OSX. +A single client will now be shared between requests of the same type. + +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/3703 \ No newline at end of file diff --git a/apollo-router/src/uplink/mod.rs b/apollo-router/src/uplink/mod.rs index 8f10a44c28..b72ef91935 100644 --- a/apollo-router/src/uplink/mod.rs +++ b/apollo-router/src/uplink/mod.rs @@ -4,6 +4,7 @@ use std::time::Duration; use std::time::Instant; use futures::Stream; +use futures::StreamExt; use graphql_client::QueryBody; use thiserror::Error; use tokio::sync::mpsc::channel; @@ -169,6 +170,17 @@ where { let query = query_name::(); let (sender, receiver) = channel(2); + let client = match reqwest::Client::builder() + .timeout(uplink_config.timeout) + .build() + { + Ok(client) => client, + Err(err) => { + tracing::error!("unable to create client to query uplink: {err}", err = err); + return futures::stream::empty().boxed(); + } + }; + let task = async move { let mut last_id = None; let mut endpoints = uplink_config.endpoints.unwrap_or_default(); @@ -181,13 +193,7 @@ where let query_body = Query::build_query(variables.into()); - match fetch::( - &query_body, - &mut endpoints.iter(), - uplink_config.timeout, - ) - .await - { + match fetch::(&client, &query_body, &mut endpoints.iter()).await { Ok(response) => { tracing::info!( counter.apollo_router_uplink_fetch_count_total = 1, @@ -255,13 +261,13 @@ where }; drop(tokio::task::spawn(task.with_current_subscriber())); - ReceiverStream::new(receiver) + ReceiverStream::new(receiver).boxed() } pub(crate) async fn fetch( + client: &reqwest::Client, request_body: &QueryBody, urls: &mut impl Iterator, - timeout: Duration, ) -> Result, Error> where Query: graphql_client::GraphQLQuery, @@ -272,7 +278,7 @@ where let query = query_name::(); for url in urls { let now = Instant::now(); - match http_request::(url.as_str(), request_body, timeout).await { + match http_request::(client, url.as_str(), request_body).await { Ok(response) => { let response = response.data.map(Into::into); match &response { @@ -352,14 +358,13 @@ fn query_name() -> &'static str { } async fn http_request( + client: &reqwest::Client, url: &str, request_body: &QueryBody, - timeout: Duration, ) -> Result, reqwest::Error> where Query: graphql_client::GraphQLQuery, { - let client = reqwest::Client::builder().timeout(timeout).build()?; // It is possible that istio-proxy is re-configuring networking beneath us. If it is, we'll see an error something like this: // level: "ERROR" // message: "fetch failed from all endpoints" From e84f93d9e161779e57b1646845ae07840b5f9e34 Mon Sep 17 00:00:00 2001 From: Edward Huang <18322228+shorgi@users.noreply.github.com> Date: Tue, 29 Aug 2023 22:02:48 -0700 Subject: [PATCH 12/81] Clarify that metrics are available for Prometheus and OpenTelemetry (#3706) --- docs/source/configuration/metrics.mdx | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/source/configuration/metrics.mdx b/docs/source/configuration/metrics.mdx index bd6d2d37c4..a250824e5b 100644 --- a/docs/source/configuration/metrics.mdx +++ b/docs/source/configuration/metrics.mdx @@ -17,7 +17,7 @@ telemetry: # (Optional) service_namespace: "apollo" prometheus: - # By setting this endpoint you enable the prometheus exporter + # By setting this endpoint you enable the Prometheus exporter # All our endpoints exposed by plugins are namespaced by the name of the plugin enabled: true listen: 127.0.0.1:9090 @@ -26,8 +26,8 @@ telemetry: ### Using in a containers environment -The prometheus endpoint listens to 127.0.0.1 by default, which won't allow connections issued from a network. -While this is a safe default, _other containers won't be able to access the prometheus endpoint_, which will disable metric scraping. +The Prometheus endpoint listens to 127.0.0.1 by default, which won't allow connections issued from a network. +While this is a safe default, _other containers won't be able to access the Prometheus endpoint_, which will disable metric scraping. You can change this by setting: @@ -35,7 +35,7 @@ You can change this by setting: telemetry: metrics: prometheus: - # By setting this endpoint you enable other containers and pods to access the prometheus endpoint + # By setting this endpoint you enable other containers and pods to access the Prometheus endpoint enabled: true listen: 0.0.0.0:9090 path: /metrics @@ -58,7 +58,7 @@ apollo_router_http_request_duration_seconds_bucket{le="0.9"} 1 ### Available metrics -The following metrics are available when using Prometheus. Attributes are listed where applicable. +The following metrics are available for Prometheus and OpenTelemetry. Attributes are listed where applicable. #### HTTP From 602bf6488bfc83d6373c92f9d75feb48278e332c Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 30 Aug 2023 11:40:52 +0300 Subject: [PATCH 13/81] fix(deps): update all non-major packages >= 1.0 (#3627) --- Cargo.lock | 132 ++++--------- apollo-router-scaffold/Cargo.toml | 2 +- apollo-router/Cargo.toml | 18 +- .../datadog-subgraph/package-lock.json | 28 +-- .../tracing/datadog-subgraph/package.json | 2 +- .../tracing/jaeger-subgraph/package-lock.json | 186 +++++++++--------- .../tracing/jaeger-subgraph/package.json | 2 +- .../tracing/zipkin-subgraph/package-lock.json | 186 +++++++++--------- .../tracing/zipkin-subgraph/package.json | 2 +- xtask/Cargo.lock | 52 ++--- xtask/Cargo.toml | 6 +- 11 files changed, 277 insertions(+), 339 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9b05f4eabd..87b468f0df 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -146,16 +146,15 @@ dependencies = [ [[package]] name = "anstream" -version = "0.3.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163" +checksum = "b1f58811cfac344940f1a400b6e6231ce35171f614f26439e80f8c1465c5cc0c" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", - "is-terminal", "utf8parse", ] @@ -185,9 +184,9 @@ dependencies = [ [[package]] name = "anstyle-wincon" -version = "1.0.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c677ab05e09154296dd37acecd46420c17b9713e8366facafa8fc0885167cf4c" +checksum = "58f54d10c6dfa51283a066ceab3ec1ab78d13fae00aa49243a45e4571fb79dfd" dependencies = [ "anstyle", "windows-sys 0.48.0", @@ -296,7 +295,7 @@ dependencies = [ "buildstructor 0.5.3", "bytes", "ci_info", - "clap 4.3.23", + "clap 4.4.1", "console-subscriber", "dashmap", "derivative", @@ -317,7 +316,7 @@ dependencies = [ "http", "http-body", "http-serde", - "humantime 2.1.0", + "humantime", "humantime-serde", "hyper", "hyper-rustls", @@ -440,7 +439,7 @@ version = "1.28.1" dependencies = [ "anyhow", "cargo-scaffold", - "clap 4.3.23", + "clap 4.4.1", "copy_dir", "regex", "str_inflector", @@ -1357,9 +1356,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.23" +version = "4.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03aef18ddf7d879c15ce20f04826ef8418101c7e528014c3eeea13321047dca3" +checksum = "7c8d502cbaec4595d2e7d5f61e318f05417bd2b66fdc3809498f0d3fdf0bea27" dependencies = [ "clap_builder", "clap_derive", @@ -1368,9 +1367,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.3.23" +version = "4.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8ce6fffb678c9b80a70b6b6de0aad31df727623a70fd9a842c30cd573e2fa98" +checksum = "5891c7bc0edb3e1c2204fc5e94009affabeb1821c9e5fdc3959536c5c0bb984d" dependencies = [ "anstream", "anstyle", @@ -1380,9 +1379,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.3.12" +version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050" +checksum = "c9fd1a5729c4548118d7d70ff234a44868d00489a4b6597b0b020918a0e91a1a" dependencies = [ "heck 0.4.1", "proc-macro2", @@ -1537,7 +1536,7 @@ dependencies = [ "crossbeam-utils", "futures", "hdrhistogram", - "humantime 2.1.0", + "humantime", "prost-types", "serde", "serde_json", @@ -1678,7 +1677,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.3.23", + "clap 4.4.1", "criterion-plot", "futures", "is-terminal", @@ -1850,9 +1849,9 @@ dependencies = [ [[package]] name = "dashmap" -version = "5.5.1" +version = "5.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edd72493923899c6f10c641bdbdeddc7183d6396641d99c1a0d1597f37f92e28" +checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ "cfg-if", "hashbrown 0.14.0", @@ -2308,26 +2307,13 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "env_logger" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" -dependencies = [ - "atty", - "humantime 1.3.0", - "log", - "regex", - "termcolor", -] - [[package]] name = "env_logger" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0" dependencies = [ - "humantime 2.1.0", + "humantime", "is-terminal", "log", "regex", @@ -2570,9 +2556,9 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "fred" -version = "6.3.0" +version = "6.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e02c21b098d77b0e99fe0054ebd3e7c9f81bffb42aa843021415ffa793124a6" +checksum = "dca2a979eaeb5d8a819edc193860ce54797730559464bc253cd3a2f765e58bd5" dependencies = [ "arc-swap", "arcstr", @@ -2585,12 +2571,11 @@ dependencies = [ "lazy_static", "log", "parking_lot 0.12.1", - "pretty_env_logger", "rand 0.8.5", "redis-protocol", "rustls 0.21.6", "rustls-native-certs", - "rustls-webpki 0.100.2", + "rustls-webpki", "semver 1.0.18", "sha-1", "tokio", @@ -3156,15 +3141,6 @@ dependencies = [ "libm", ] -[[package]] -name = "humantime" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" -dependencies = [ - "quick-error", -] - [[package]] name = "humantime" version = "2.1.0" @@ -3177,7 +3153,7 @@ version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57a3db5ea5923d99402c94e9feb261dc5ee9b4efa158b0315f788cf549cc200c" dependencies = [ - "humantime 2.1.0", + "humantime", "serde", ] @@ -3741,9 +3717,9 @@ checksum = "8c408dc227d302f1496c84d9dc68c00fec6f56f9228a18f3023f976f3ca7c945" [[package]] name = "memchr" -version = "2.5.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "f478948fd84d9f8e86967bf432640e46adfb5a4bd4f14ef7e864ab38220534ae" [[package]] name = "memoffset" @@ -4661,16 +4637,6 @@ dependencies = [ "termtree", ] -[[package]] -name = "pretty_env_logger" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "926d36b9553851b8b0005f1275891b392ee4d2d833852c417ed025477350fb9d" -dependencies = [ - "env_logger 0.7.1", - "log", -] - [[package]] name = "prettyplease" version = "0.1.25" @@ -4843,12 +4809,6 @@ version = "2.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94" -[[package]] -name = "quick-error" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" - [[package]] name = "quote" version = "1.0.33" @@ -5016,14 +4976,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.3" +version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" +checksum = "12de2eff854e5fa4b1295edd650e227e9d8fb0c9e90b12e7f36d6a6811791a29" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.3.6", - "regex-syntax 0.7.4", + "regex-automata 0.3.7", + "regex-syntax 0.7.5", ] [[package]] @@ -5037,13 +4997,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" +checksum = "49530408a136e16e5b486e883fbb6ba058e8e4e8ae6621a77b048b314336e629" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.4", + "regex-syntax 0.7.5", ] [[package]] @@ -5054,9 +5014,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" +checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "reqwest" @@ -5261,7 +5221,7 @@ version = "0.0.0" dependencies = [ "apollo-parser 0.6.1", "apollo-smith", - "env_logger 0.10.0", + "env_logger", "libfuzzer-sys", "log", "reqwest", @@ -5412,7 +5372,7 @@ checksum = "1d1feddffcfcc0b33f5c6ce9a29e341e4cd59c3f78e7ee45f4a40c038b1d6cbb" dependencies = [ "log", "ring", - "rustls-webpki 0.101.4", + "rustls-webpki", "sct", ] @@ -5437,16 +5397,6 @@ dependencies = [ "base64 0.21.2", ] -[[package]] -name = "rustls-webpki" -version = "0.100.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e98ff011474fa39949b7e5c0428f9b4937eda7da7848bbb947786b7be0b27dab" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "rustls-webpki" version = "0.101.4" @@ -5617,9 +5567,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.185" +version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be9b6f69f1dfd54c3b568ffa45c310d6973a5e5148fd40cf515acaf38cf5bc31" +checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" dependencies = [ "serde_derive", ] @@ -5635,9 +5585,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.185" +version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc59dfdcbad1437773485e0367fea4b090a2e0a16d9ffc46af47764536a298ec" +checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", @@ -7061,9 +7011,9 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.4.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" +checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" dependencies = [ "form_urlencoded", "idna", diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml index a0a36373ed..7f8f650f0c 100644 --- a/apollo-router-scaffold/Cargo.toml +++ b/apollo-router-scaffold/Cargo.toml @@ -8,7 +8,7 @@ publish = false [dependencies] anyhow = "1.0.75" -clap = { version = "4.3.23", features = ["derive"] } +clap = { version = "4.4.1", features = ["derive"] } cargo-scaffold = { version = "0.8.10", default-features = false } regex = "1" str_inflector = "0.12.0" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 79833c6fdc..1b22a3c7a8 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -69,7 +69,7 @@ axum = { version = "0.6.20", features = ["headers", "json", "original-uri"] } base64 = "0.21.2" buildstructor = "0.5.3" bytes = "1.4.0" -clap = { version = "4.3.23", default-features = false, features = [ +clap = { version = "4.4.1", default-features = false, features = [ "env", "derive", "std", @@ -77,7 +77,7 @@ clap = { version = "4.3.23", default-features = false, features = [ ] } console-subscriber = { version = "0.1.10", optional = true } ci_info = { version = "0.14.11", features = ["serde-1"] } -dashmap = { version = "5.5.0", features = ["serde"] } +dashmap = { version = "5.5.3", features = ["serde"] } derivative = "2.2.0" derive_more = { version = "0.99.17", default-features = false, features = [ "from", @@ -88,7 +88,7 @@ diff = "0.1.13" directories = "5.0.1" displaydoc = "0.2" flate2 = "1.0.27" -fred = { version = "6.3.0", features = ["enable-rustls", "no-client-setname"] } +fred = { version = "6.3.1", features = ["enable-rustls", "no-client-setname"] } futures = { version = "0.3.28", features = ["thread-pool"] } graphql_client = "0.13.0" hex = "0.4.3" @@ -163,7 +163,7 @@ prost-types = "0.11.9" proteus = "0.5.0" rand = "0.8.5" rhai = { version = "1.15.1", features = ["sync", "serde", "internals"] } -regex = "1.9.3" +regex = "1.9.4" reqwest = { version = "0.11.19", default-features = false, features = [ "rustls-tls", "rustls-native-certs", @@ -178,7 +178,7 @@ rustls-pemfile = "1.0.3" schemars = { version = "0.8.12", features = ["url"] } shellexpand = "3.1.0" sha2 = "0.10.7" -serde = { version = "1.0.183", features = ["derive", "rc"] } +serde = { version = "1.0.188", features = ["derive", "rc"] } serde_json_bytes = { version = "0.2.1", features = ["preserve_order"] } serde_json = { version = "1.0.105", features = [ "preserve_order", @@ -218,7 +218,7 @@ tracing-core = "0.1.31" tracing-futures = { version = "0.2.5", features = ["futures-03"] } tracing-opentelemetry = "0.19.0" tracing-subscriber = { version = "0.3.17", features = ["env-filter", "json"] } -url = { version = "2.4.0", features = ["serde"] } +url = { version = "2.4.1", features = ["serde"] } urlencoding = "2.1.3" uuid = { version = "1.4.1", features = ["serde", "v4"] } yaml-rust = "0.4.5" @@ -231,7 +231,7 @@ tokio-rustls = "0.24.1" http-serde = "1.1.3" hmac = "0.12.1" parking_lot = "0.12.1" -memchr = "2.5.0" +memchr = "2.6.1" brotli = "3.3.4" zstd = "0.12.4" zstd-safe = "6.0.6" @@ -260,12 +260,12 @@ axum = { version = "0.6.20", features = [ "ws", ] } ecdsa = { version = "0.15.1", features = ["signing", "pem", "pkcs8"] } -fred = { version = "6.3.0", features = ["enable-rustls", "no-client-setname"] } +fred = { version = "6.3.1", features = ["enable-rustls", "no-client-setname"] } futures-test = "0.3.28" insta = { version = "1.31.0", features = ["json", "redactions", "yaml"] } introspector-gadget = "0.2.2" maplit = "1.0.2" -memchr = { version = "2.5.0", default-features = false } +memchr = { version = "2.6.1", default-features = false } mockall = "0.11.4" once_cell = "1.18.0" p256 = "0.12.0" diff --git a/dockerfiles/tracing/datadog-subgraph/package-lock.json b/dockerfiles/tracing/datadog-subgraph/package-lock.json index c4d6227c35..e1c0feeff9 100644 --- a/dockerfiles/tracing/datadog-subgraph/package-lock.json +++ b/dockerfiles/tracing/datadog-subgraph/package-lock.json @@ -16,7 +16,7 @@ "graphql": "^16.5.0" }, "devDependencies": { - "typescript": "5.1.6" + "typescript": "5.2.2" } }, "node_modules/@apollo/cache-control-types": { @@ -69,9 +69,9 @@ } }, "node_modules/@apollo/server": { - "version": "4.9.1", - "resolved": "https://registry.npmjs.org/@apollo/server/-/server-4.9.1.tgz", - "integrity": "sha512-uUzkHt7DU/NEdwMvkb4GZq8ho2EYJAJXTiBq0HUhhjOuxMVfZ7fbKgOIcSF33Ur7c67fLdWwulXMAvv89Cyv0w==", + "version": "4.9.2", + "resolved": "https://registry.npmjs.org/@apollo/server/-/server-4.9.2.tgz", + "integrity": "sha512-DXARzsL7gvBfhUL2gTCpGduaH5wQFZi72/6ZOalpzT9InepIz0wL9TffSNVuaYla5u6JB9kX8WtnVUKf7IuHTA==", "dependencies": { "@apollo/cache-control-types": "^1.0.3", "@apollo/server-gateway-interface": "^1.1.1", @@ -390,9 +390,9 @@ } }, "node_modules/@datadog/pprof": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/@datadog/pprof/-/pprof-3.1.0.tgz", - "integrity": "sha512-Bg8O8yrHeL2KKHXhLoAAT33ZfzLnZ6rWfOjy8PkcNhUJy3UwNVLbUoApf+99EyLjqpzpk/kZXrIAMBzMMB8ilg==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/@datadog/pprof/-/pprof-3.2.0.tgz", + "integrity": "sha512-kOhWHCWB80djnMCr5KNKBAy1Ih/jK/PIj6yqnZwL1Wqni/h6IBPRUMhtIxcYJMRgsZVYrFXUV20AVXTZCzFokw==", "hasInstallScript": true, "dependencies": { "delay": "^5.0.0", @@ -859,16 +859,16 @@ "integrity": "sha512-/RC5F4l1SCqD/jazwUF6+t34Cd8zTSAGZ7rvvZu1whZUhD2a5MOGKjSGowoGcpj/cbVZk1ZODIooJEQQq3nNAA==" }, "node_modules/dd-trace": { - "version": "4.12.0", - "resolved": "https://registry.npmjs.org/dd-trace/-/dd-trace-4.12.0.tgz", - "integrity": "sha512-QAvlKWUn8Tx75hbos7d7hk/9iCo3tKShd3vGg4mMtl3YWYZd7wsYUtX1PJUS+CjQgOPh2XJuBLAGYoamYYTeBg==", + "version": "4.13.1", + "resolved": "https://registry.npmjs.org/dd-trace/-/dd-trace-4.13.1.tgz", + "integrity": "sha512-2MGnMhA4bDnkv0B89rrv59ur1YAPL7CXzQ3tuvJqu8p/PlWXi2e11DXfNkEWXGB3JJXJxvqNsezmOleu48UEkg==", "hasInstallScript": true, "dependencies": { "@datadog/native-appsec": "^3.2.0", "@datadog/native-iast-rewriter": "2.0.1", "@datadog/native-iast-taint-tracking": "1.5.0", "@datadog/native-metrics": "^2.0.0", - "@datadog/pprof": "3.1.0", + "@datadog/pprof": "3.2.0", "@datadog/sketches-js": "^2.1.0", "@opentelemetry/api": "^1.0.0", "@opentelemetry/core": "^1.14.0", @@ -1774,9 +1774,9 @@ } }, "node_modules/typescript": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.1.6.tgz", - "integrity": "sha512-zaWCozRZ6DLEWAWFrVDz1H6FVXzUSfTy5FUMWsQlU8Ym5JP9eO4xkTIROFCQvhQf61z6O/G6ugw3SgAnvvm+HA==", + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz", + "integrity": "sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==", "dev": true, "bin": { "tsc": "bin/tsc", diff --git a/dockerfiles/tracing/datadog-subgraph/package.json b/dockerfiles/tracing/datadog-subgraph/package.json index 62e26bdb33..8cebb46890 100644 --- a/dockerfiles/tracing/datadog-subgraph/package.json +++ b/dockerfiles/tracing/datadog-subgraph/package.json @@ -18,6 +18,6 @@ "graphql": "^16.5.0" }, "devDependencies": { - "typescript": "5.1.6" + "typescript": "5.2.2" } } diff --git a/dockerfiles/tracing/jaeger-subgraph/package-lock.json b/dockerfiles/tracing/jaeger-subgraph/package-lock.json index 9f8799c884..ac867e6837 100644 --- a/dockerfiles/tracing/jaeger-subgraph/package-lock.json +++ b/dockerfiles/tracing/jaeger-subgraph/package-lock.json @@ -18,7 +18,7 @@ "opentracing": "^0.14.7" }, "devDependencies": { - "typescript": "5.1.6" + "typescript": "5.2.2" } }, "node_modules/@apollo/cache-control-types": { @@ -71,9 +71,9 @@ } }, "node_modules/@apollo/server": { - "version": "4.9.1", - "resolved": "https://registry.npmjs.org/@apollo/server/-/server-4.9.1.tgz", - "integrity": "sha512-uUzkHt7DU/NEdwMvkb4GZq8ho2EYJAJXTiBq0HUhhjOuxMVfZ7fbKgOIcSF33Ur7c67fLdWwulXMAvv89Cyv0w==", + "version": "4.9.2", + "resolved": "https://registry.npmjs.org/@apollo/server/-/server-4.9.2.tgz", + "integrity": "sha512-DXARzsL7gvBfhUL2gTCpGduaH5wQFZi72/6ZOalpzT9InepIz0wL9TffSNVuaYla5u6JB9kX8WtnVUKf7IuHTA==", "dependencies": { "@apollo/cache-control-types": "^1.0.3", "@apollo/server-gateway-interface": "^1.1.1", @@ -163,44 +163,6 @@ "node": ">=14" } }, - "node_modules/@apollo/server/node_modules/@graphql-tools/merge": { - "version": "8.4.2", - "resolved": "https://registry.npmjs.org/@graphql-tools/merge/-/merge-8.4.2.tgz", - "integrity": "sha512-XbrHAaj8yDuINph+sAfuq3QCZ/tKblrTLOpirK0+CAgNlZUCHs0Fa+xtMUURgwCVThLle1AF7svJCxFizygLsw==", - "dependencies": { - "@graphql-tools/utils": "^9.2.1", - "tslib": "^2.4.0" - }, - "peerDependencies": { - "graphql": "^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0" - } - }, - "node_modules/@apollo/server/node_modules/@graphql-tools/schema": { - "version": "9.0.19", - "resolved": "https://registry.npmjs.org/@graphql-tools/schema/-/schema-9.0.19.tgz", - "integrity": "sha512-oBRPoNBtCkk0zbUsyP4GaIzCt8C0aCI4ycIRUL67KK5pOHljKLBBtGT+Jr6hkzA74C8Gco8bpZPe7aWFjiaK2w==", - "dependencies": { - "@graphql-tools/merge": "^8.4.1", - "@graphql-tools/utils": "^9.2.1", - "tslib": "^2.4.0", - "value-or-promise": "^1.0.12" - }, - "peerDependencies": { - "graphql": "^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0" - } - }, - "node_modules/@apollo/server/node_modules/@graphql-tools/utils": { - "version": "9.2.1", - "resolved": "https://registry.npmjs.org/@graphql-tools/utils/-/utils-9.2.1.tgz", - "integrity": "sha512-WUw506Ql6xzmOORlriNrD6Ugx+HjVgYxt9KCXD9mHAak+eaXSwuGGPyE60hy9xaDEoXKBsG7SkG69ybitaVl6A==", - "dependencies": { - "@graphql-typed-document-node/core": "^3.1.1", - "tslib": "^2.4.0" - }, - "peerDependencies": { - "graphql": "^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0" - } - }, "node_modules/@apollo/server/node_modules/uuid": { "version": "9.0.0", "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.0.tgz", @@ -209,14 +171,6 @@ "uuid": "dist/bin/uuid" } }, - "node_modules/@apollo/server/node_modules/value-or-promise": { - "version": "1.0.12", - "resolved": "https://registry.npmjs.org/value-or-promise/-/value-or-promise-1.0.12.tgz", - "integrity": "sha512-Z6Uz+TYwEqE7ZN50gwn+1LCVo9ZVrpxRPOhOLnncYkY1ZzOYtrX8Fwf/rFktZ8R5mJms6EZf5TqNOMeZmnPq9Q==", - "engines": { - "node": ">=12" - } - }, "node_modules/@apollo/subgraph": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/@apollo/subgraph/-/subgraph-0.6.1.tgz", @@ -390,6 +344,44 @@ "node": ">=14" } }, + "node_modules/@graphql-tools/merge": { + "version": "8.4.2", + "resolved": "https://registry.npmjs.org/@graphql-tools/merge/-/merge-8.4.2.tgz", + "integrity": "sha512-XbrHAaj8yDuINph+sAfuq3QCZ/tKblrTLOpirK0+CAgNlZUCHs0Fa+xtMUURgwCVThLle1AF7svJCxFizygLsw==", + "dependencies": { + "@graphql-tools/utils": "^9.2.1", + "tslib": "^2.4.0" + }, + "peerDependencies": { + "graphql": "^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0" + } + }, + "node_modules/@graphql-tools/schema": { + "version": "9.0.19", + "resolved": "https://registry.npmjs.org/@graphql-tools/schema/-/schema-9.0.19.tgz", + "integrity": "sha512-oBRPoNBtCkk0zbUsyP4GaIzCt8C0aCI4ycIRUL67KK5pOHljKLBBtGT+Jr6hkzA74C8Gco8bpZPe7aWFjiaK2w==", + "dependencies": { + "@graphql-tools/merge": "^8.4.1", + "@graphql-tools/utils": "^9.2.1", + "tslib": "^2.4.0", + "value-or-promise": "^1.0.12" + }, + "peerDependencies": { + "graphql": "^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0" + } + }, + "node_modules/@graphql-tools/utils": { + "version": "9.2.1", + "resolved": "https://registry.npmjs.org/@graphql-tools/utils/-/utils-9.2.1.tgz", + "integrity": "sha512-WUw506Ql6xzmOORlriNrD6Ugx+HjVgYxt9KCXD9mHAak+eaXSwuGGPyE60hy9xaDEoXKBsG7SkG69ybitaVl6A==", + "dependencies": { + "@graphql-typed-document-node/core": "^3.1.1", + "tslib": "^2.4.0" + }, + "peerDependencies": { + "graphql": "^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0" + } + }, "node_modules/@graphql-typed-document-node/core": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/@graphql-typed-document-node/core/-/core-3.2.0.tgz", @@ -1435,9 +1427,9 @@ } }, "node_modules/typescript": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.1.6.tgz", - "integrity": "sha512-zaWCozRZ6DLEWAWFrVDz1H6FVXzUSfTy5FUMWsQlU8Ym5JP9eO4xkTIROFCQvhQf61z6O/G6ugw3SgAnvvm+HA==", + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz", + "integrity": "sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==", "dev": true, "bin": { "tsc": "bin/tsc", @@ -1471,6 +1463,14 @@ "uuid": "dist/bin/uuid" } }, + "node_modules/value-or-promise": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/value-or-promise/-/value-or-promise-1.0.12.tgz", + "integrity": "sha512-Z6Uz+TYwEqE7ZN50gwn+1LCVo9ZVrpxRPOhOLnncYkY1ZzOYtrX8Fwf/rFktZ8R5mJms6EZf5TqNOMeZmnPq9Q==", + "engines": { + "node": ">=12" + } + }, "node_modules/vary": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", @@ -1553,9 +1553,9 @@ } }, "@apollo/server": { - "version": "4.9.1", - "resolved": "https://registry.npmjs.org/@apollo/server/-/server-4.9.1.tgz", - "integrity": "sha512-uUzkHt7DU/NEdwMvkb4GZq8ho2EYJAJXTiBq0HUhhjOuxMVfZ7fbKgOIcSF33Ur7c67fLdWwulXMAvv89Cyv0w==", + "version": "4.9.2", + "resolved": "https://registry.npmjs.org/@apollo/server/-/server-4.9.2.tgz", + "integrity": "sha512-DXARzsL7gvBfhUL2gTCpGduaH5wQFZi72/6ZOalpzT9InepIz0wL9TffSNVuaYla5u6JB9kX8WtnVUKf7IuHTA==", "requires": { "@apollo/cache-control-types": "^1.0.3", "@apollo/server-gateway-interface": "^1.1.1", @@ -1599,44 +1599,10 @@ "resolved": "https://registry.npmjs.org/@apollo/utils.logger/-/utils.logger-2.0.1.tgz", "integrity": "sha512-YuplwLHaHf1oviidB7MxnCXAdHp3IqYV8n0momZ3JfLniae92eYqMIx+j5qJFX6WKJPs6q7bczmV4lXIsTu5Pg==" }, - "@graphql-tools/merge": { - "version": "8.4.2", - "resolved": "https://registry.npmjs.org/@graphql-tools/merge/-/merge-8.4.2.tgz", - "integrity": "sha512-XbrHAaj8yDuINph+sAfuq3QCZ/tKblrTLOpirK0+CAgNlZUCHs0Fa+xtMUURgwCVThLle1AF7svJCxFizygLsw==", - "requires": { - "@graphql-tools/utils": "^9.2.1", - "tslib": "^2.4.0" - } - }, - "@graphql-tools/schema": { - "version": "9.0.19", - "resolved": "https://registry.npmjs.org/@graphql-tools/schema/-/schema-9.0.19.tgz", - "integrity": "sha512-oBRPoNBtCkk0zbUsyP4GaIzCt8C0aCI4ycIRUL67KK5pOHljKLBBtGT+Jr6hkzA74C8Gco8bpZPe7aWFjiaK2w==", - "requires": { - "@graphql-tools/merge": "^8.4.1", - "@graphql-tools/utils": "^9.2.1", - "tslib": "^2.4.0", - "value-or-promise": "^1.0.12" - } - }, - "@graphql-tools/utils": { - "version": "9.2.1", - "resolved": "https://registry.npmjs.org/@graphql-tools/utils/-/utils-9.2.1.tgz", - "integrity": "sha512-WUw506Ql6xzmOORlriNrD6Ugx+HjVgYxt9KCXD9mHAak+eaXSwuGGPyE60hy9xaDEoXKBsG7SkG69ybitaVl6A==", - "requires": { - "@graphql-typed-document-node/core": "^3.1.1", - "tslib": "^2.4.0" - } - }, "uuid": { "version": "9.0.0", "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.0.tgz", "integrity": "sha512-MXcSTerfPa4uqyzStbRoTgt5XIe3x5+42+q1sDuy3R5MDk66URdLMOZe5aPX/SQd+kuYAh0FdP/pO28IkQyTeg==" - }, - "value-or-promise": { - "version": "1.0.12", - "resolved": "https://registry.npmjs.org/value-or-promise/-/value-or-promise-1.0.12.tgz", - "integrity": "sha512-Z6Uz+TYwEqE7ZN50gwn+1LCVo9ZVrpxRPOhOLnncYkY1ZzOYtrX8Fwf/rFktZ8R5mJms6EZf5TqNOMeZmnPq9Q==" } } }, @@ -1787,6 +1753,35 @@ "resolved": "https://registry.npmjs.org/@apollo/utils.withrequired/-/utils.withrequired-2.0.1.tgz", "integrity": "sha512-YBDiuAX9i1lLc6GeTy1m7DGLFn/gMnvXqlalOIMjM7DeOgIacEjjfwPqb0M1CQ2v11HhR15d1NmxJoRCfrNqcA==" }, + "@graphql-tools/merge": { + "version": "8.4.2", + "resolved": "https://registry.npmjs.org/@graphql-tools/merge/-/merge-8.4.2.tgz", + "integrity": "sha512-XbrHAaj8yDuINph+sAfuq3QCZ/tKblrTLOpirK0+CAgNlZUCHs0Fa+xtMUURgwCVThLle1AF7svJCxFizygLsw==", + "requires": { + "@graphql-tools/utils": "^9.2.1", + "tslib": "^2.4.0" + } + }, + "@graphql-tools/schema": { + "version": "9.0.19", + "resolved": "https://registry.npmjs.org/@graphql-tools/schema/-/schema-9.0.19.tgz", + "integrity": "sha512-oBRPoNBtCkk0zbUsyP4GaIzCt8C0aCI4ycIRUL67KK5pOHljKLBBtGT+Jr6hkzA74C8Gco8bpZPe7aWFjiaK2w==", + "requires": { + "@graphql-tools/merge": "^8.4.1", + "@graphql-tools/utils": "^9.2.1", + "tslib": "^2.4.0", + "value-or-promise": "^1.0.12" + } + }, + "@graphql-tools/utils": { + "version": "9.2.1", + "resolved": "https://registry.npmjs.org/@graphql-tools/utils/-/utils-9.2.1.tgz", + "integrity": "sha512-WUw506Ql6xzmOORlriNrD6Ugx+HjVgYxt9KCXD9mHAak+eaXSwuGGPyE60hy9xaDEoXKBsG7SkG69ybitaVl6A==", + "requires": { + "@graphql-typed-document-node/core": "^3.1.1", + "tslib": "^2.4.0" + } + }, "@graphql-typed-document-node/core": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/@graphql-typed-document-node/core/-/core-3.2.0.tgz", @@ -2591,9 +2586,9 @@ } }, "typescript": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.1.6.tgz", - "integrity": "sha512-zaWCozRZ6DLEWAWFrVDz1H6FVXzUSfTy5FUMWsQlU8Ym5JP9eO4xkTIROFCQvhQf61z6O/G6ugw3SgAnvvm+HA==", + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz", + "integrity": "sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==", "dev": true }, "unpipe": { @@ -2611,6 +2606,11 @@ "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==" }, + "value-or-promise": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/value-or-promise/-/value-or-promise-1.0.12.tgz", + "integrity": "sha512-Z6Uz+TYwEqE7ZN50gwn+1LCVo9ZVrpxRPOhOLnncYkY1ZzOYtrX8Fwf/rFktZ8R5mJms6EZf5TqNOMeZmnPq9Q==" + }, "vary": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", diff --git a/dockerfiles/tracing/jaeger-subgraph/package.json b/dockerfiles/tracing/jaeger-subgraph/package.json index f4a7ece6c1..629c49b2ad 100644 --- a/dockerfiles/tracing/jaeger-subgraph/package.json +++ b/dockerfiles/tracing/jaeger-subgraph/package.json @@ -19,6 +19,6 @@ "opentracing": "^0.14.7" }, "devDependencies": { - "typescript": "5.1.6" + "typescript": "5.2.2" } } diff --git a/dockerfiles/tracing/zipkin-subgraph/package-lock.json b/dockerfiles/tracing/zipkin-subgraph/package-lock.json index 64f262076b..7e9bc9ffa9 100644 --- a/dockerfiles/tracing/zipkin-subgraph/package-lock.json +++ b/dockerfiles/tracing/zipkin-subgraph/package-lock.json @@ -19,7 +19,7 @@ "zipkin-javascript-opentracing": "^3.0.0" }, "devDependencies": { - "typescript": "5.1.6" + "typescript": "5.2.2" } }, "node_modules/@apollo/cache-control-types": { @@ -72,9 +72,9 @@ } }, "node_modules/@apollo/server": { - "version": "4.9.1", - "resolved": "https://registry.npmjs.org/@apollo/server/-/server-4.9.1.tgz", - "integrity": "sha512-uUzkHt7DU/NEdwMvkb4GZq8ho2EYJAJXTiBq0HUhhjOuxMVfZ7fbKgOIcSF33Ur7c67fLdWwulXMAvv89Cyv0w==", + "version": "4.9.2", + "resolved": "https://registry.npmjs.org/@apollo/server/-/server-4.9.2.tgz", + "integrity": "sha512-DXARzsL7gvBfhUL2gTCpGduaH5wQFZi72/6ZOalpzT9InepIz0wL9TffSNVuaYla5u6JB9kX8WtnVUKf7IuHTA==", "dependencies": { "@apollo/cache-control-types": "^1.0.3", "@apollo/server-gateway-interface": "^1.1.1", @@ -164,44 +164,6 @@ "node": ">=14" } }, - "node_modules/@apollo/server/node_modules/@graphql-tools/merge": { - "version": "8.4.2", - "resolved": "https://registry.npmjs.org/@graphql-tools/merge/-/merge-8.4.2.tgz", - "integrity": "sha512-XbrHAaj8yDuINph+sAfuq3QCZ/tKblrTLOpirK0+CAgNlZUCHs0Fa+xtMUURgwCVThLle1AF7svJCxFizygLsw==", - "dependencies": { - "@graphql-tools/utils": "^9.2.1", - "tslib": "^2.4.0" - }, - "peerDependencies": { - "graphql": "^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0" - } - }, - "node_modules/@apollo/server/node_modules/@graphql-tools/schema": { - "version": "9.0.19", - "resolved": "https://registry.npmjs.org/@graphql-tools/schema/-/schema-9.0.19.tgz", - "integrity": "sha512-oBRPoNBtCkk0zbUsyP4GaIzCt8C0aCI4ycIRUL67KK5pOHljKLBBtGT+Jr6hkzA74C8Gco8bpZPe7aWFjiaK2w==", - "dependencies": { - "@graphql-tools/merge": "^8.4.1", - "@graphql-tools/utils": "^9.2.1", - "tslib": "^2.4.0", - "value-or-promise": "^1.0.12" - }, - "peerDependencies": { - "graphql": "^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0" - } - }, - "node_modules/@apollo/server/node_modules/@graphql-tools/utils": { - "version": "9.2.1", - "resolved": "https://registry.npmjs.org/@graphql-tools/utils/-/utils-9.2.1.tgz", - "integrity": "sha512-WUw506Ql6xzmOORlriNrD6Ugx+HjVgYxt9KCXD9mHAak+eaXSwuGGPyE60hy9xaDEoXKBsG7SkG69ybitaVl6A==", - "dependencies": { - "@graphql-typed-document-node/core": "^3.1.1", - "tslib": "^2.4.0" - }, - "peerDependencies": { - "graphql": "^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0" - } - }, "node_modules/@apollo/server/node_modules/uuid": { "version": "9.0.0", "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.0.tgz", @@ -210,14 +172,6 @@ "uuid": "dist/bin/uuid" } }, - "node_modules/@apollo/server/node_modules/value-or-promise": { - "version": "1.0.12", - "resolved": "https://registry.npmjs.org/value-or-promise/-/value-or-promise-1.0.12.tgz", - "integrity": "sha512-Z6Uz+TYwEqE7ZN50gwn+1LCVo9ZVrpxRPOhOLnncYkY1ZzOYtrX8Fwf/rFktZ8R5mJms6EZf5TqNOMeZmnPq9Q==", - "engines": { - "node": ">=12" - } - }, "node_modules/@apollo/subgraph": { "version": "0.6.1", "resolved": "https://registry.npmjs.org/@apollo/subgraph/-/subgraph-0.6.1.tgz", @@ -391,6 +345,44 @@ "node": ">=14" } }, + "node_modules/@graphql-tools/merge": { + "version": "8.4.2", + "resolved": "https://registry.npmjs.org/@graphql-tools/merge/-/merge-8.4.2.tgz", + "integrity": "sha512-XbrHAaj8yDuINph+sAfuq3QCZ/tKblrTLOpirK0+CAgNlZUCHs0Fa+xtMUURgwCVThLle1AF7svJCxFizygLsw==", + "dependencies": { + "@graphql-tools/utils": "^9.2.1", + "tslib": "^2.4.0" + }, + "peerDependencies": { + "graphql": "^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0" + } + }, + "node_modules/@graphql-tools/schema": { + "version": "9.0.19", + "resolved": "https://registry.npmjs.org/@graphql-tools/schema/-/schema-9.0.19.tgz", + "integrity": "sha512-oBRPoNBtCkk0zbUsyP4GaIzCt8C0aCI4ycIRUL67KK5pOHljKLBBtGT+Jr6hkzA74C8Gco8bpZPe7aWFjiaK2w==", + "dependencies": { + "@graphql-tools/merge": "^8.4.1", + "@graphql-tools/utils": "^9.2.1", + "tslib": "^2.4.0", + "value-or-promise": "^1.0.12" + }, + "peerDependencies": { + "graphql": "^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0" + } + }, + "node_modules/@graphql-tools/utils": { + "version": "9.2.1", + "resolved": "https://registry.npmjs.org/@graphql-tools/utils/-/utils-9.2.1.tgz", + "integrity": "sha512-WUw506Ql6xzmOORlriNrD6Ugx+HjVgYxt9KCXD9mHAak+eaXSwuGGPyE60hy9xaDEoXKBsG7SkG69ybitaVl6A==", + "dependencies": { + "@graphql-typed-document-node/core": "^3.1.1", + "tslib": "^2.4.0" + }, + "peerDependencies": { + "graphql": "^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0" + } + }, "node_modules/@graphql-typed-document-node/core": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/@graphql-typed-document-node/core/-/core-3.2.0.tgz", @@ -1462,9 +1454,9 @@ } }, "node_modules/typescript": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.1.6.tgz", - "integrity": "sha512-zaWCozRZ6DLEWAWFrVDz1H6FVXzUSfTy5FUMWsQlU8Ym5JP9eO4xkTIROFCQvhQf61z6O/G6ugw3SgAnvvm+HA==", + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz", + "integrity": "sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==", "dev": true, "bin": { "tsc": "bin/tsc", @@ -1498,6 +1490,14 @@ "uuid": "dist/bin/uuid" } }, + "node_modules/value-or-promise": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/value-or-promise/-/value-or-promise-1.0.12.tgz", + "integrity": "sha512-Z6Uz+TYwEqE7ZN50gwn+1LCVo9ZVrpxRPOhOLnncYkY1ZzOYtrX8Fwf/rFktZ8R5mJms6EZf5TqNOMeZmnPq9Q==", + "engines": { + "node": ">=12" + } + }, "node_modules/vary": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", @@ -1612,9 +1612,9 @@ } }, "@apollo/server": { - "version": "4.9.1", - "resolved": "https://registry.npmjs.org/@apollo/server/-/server-4.9.1.tgz", - "integrity": "sha512-uUzkHt7DU/NEdwMvkb4GZq8ho2EYJAJXTiBq0HUhhjOuxMVfZ7fbKgOIcSF33Ur7c67fLdWwulXMAvv89Cyv0w==", + "version": "4.9.2", + "resolved": "https://registry.npmjs.org/@apollo/server/-/server-4.9.2.tgz", + "integrity": "sha512-DXARzsL7gvBfhUL2gTCpGduaH5wQFZi72/6ZOalpzT9InepIz0wL9TffSNVuaYla5u6JB9kX8WtnVUKf7IuHTA==", "requires": { "@apollo/cache-control-types": "^1.0.3", "@apollo/server-gateway-interface": "^1.1.1", @@ -1658,44 +1658,10 @@ "resolved": "https://registry.npmjs.org/@apollo/utils.logger/-/utils.logger-2.0.1.tgz", "integrity": "sha512-YuplwLHaHf1oviidB7MxnCXAdHp3IqYV8n0momZ3JfLniae92eYqMIx+j5qJFX6WKJPs6q7bczmV4lXIsTu5Pg==" }, - "@graphql-tools/merge": { - "version": "8.4.2", - "resolved": "https://registry.npmjs.org/@graphql-tools/merge/-/merge-8.4.2.tgz", - "integrity": "sha512-XbrHAaj8yDuINph+sAfuq3QCZ/tKblrTLOpirK0+CAgNlZUCHs0Fa+xtMUURgwCVThLle1AF7svJCxFizygLsw==", - "requires": { - "@graphql-tools/utils": "^9.2.1", - "tslib": "^2.4.0" - } - }, - "@graphql-tools/schema": { - "version": "9.0.19", - "resolved": "https://registry.npmjs.org/@graphql-tools/schema/-/schema-9.0.19.tgz", - "integrity": "sha512-oBRPoNBtCkk0zbUsyP4GaIzCt8C0aCI4ycIRUL67KK5pOHljKLBBtGT+Jr6hkzA74C8Gco8bpZPe7aWFjiaK2w==", - "requires": { - "@graphql-tools/merge": "^8.4.1", - "@graphql-tools/utils": "^9.2.1", - "tslib": "^2.4.0", - "value-or-promise": "^1.0.12" - } - }, - "@graphql-tools/utils": { - "version": "9.2.1", - "resolved": "https://registry.npmjs.org/@graphql-tools/utils/-/utils-9.2.1.tgz", - "integrity": "sha512-WUw506Ql6xzmOORlriNrD6Ugx+HjVgYxt9KCXD9mHAak+eaXSwuGGPyE60hy9xaDEoXKBsG7SkG69ybitaVl6A==", - "requires": { - "@graphql-typed-document-node/core": "^3.1.1", - "tslib": "^2.4.0" - } - }, "uuid": { "version": "9.0.0", "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.0.tgz", "integrity": "sha512-MXcSTerfPa4uqyzStbRoTgt5XIe3x5+42+q1sDuy3R5MDk66URdLMOZe5aPX/SQd+kuYAh0FdP/pO28IkQyTeg==" - }, - "value-or-promise": { - "version": "1.0.12", - "resolved": "https://registry.npmjs.org/value-or-promise/-/value-or-promise-1.0.12.tgz", - "integrity": "sha512-Z6Uz+TYwEqE7ZN50gwn+1LCVo9ZVrpxRPOhOLnncYkY1ZzOYtrX8Fwf/rFktZ8R5mJms6EZf5TqNOMeZmnPq9Q==" } } }, @@ -1846,6 +1812,35 @@ "resolved": "https://registry.npmjs.org/@apollo/utils.withrequired/-/utils.withrequired-2.0.1.tgz", "integrity": "sha512-YBDiuAX9i1lLc6GeTy1m7DGLFn/gMnvXqlalOIMjM7DeOgIacEjjfwPqb0M1CQ2v11HhR15d1NmxJoRCfrNqcA==" }, + "@graphql-tools/merge": { + "version": "8.4.2", + "resolved": "https://registry.npmjs.org/@graphql-tools/merge/-/merge-8.4.2.tgz", + "integrity": "sha512-XbrHAaj8yDuINph+sAfuq3QCZ/tKblrTLOpirK0+CAgNlZUCHs0Fa+xtMUURgwCVThLle1AF7svJCxFizygLsw==", + "requires": { + "@graphql-tools/utils": "^9.2.1", + "tslib": "^2.4.0" + } + }, + "@graphql-tools/schema": { + "version": "9.0.19", + "resolved": "https://registry.npmjs.org/@graphql-tools/schema/-/schema-9.0.19.tgz", + "integrity": "sha512-oBRPoNBtCkk0zbUsyP4GaIzCt8C0aCI4ycIRUL67KK5pOHljKLBBtGT+Jr6hkzA74C8Gco8bpZPe7aWFjiaK2w==", + "requires": { + "@graphql-tools/merge": "^8.4.1", + "@graphql-tools/utils": "^9.2.1", + "tslib": "^2.4.0", + "value-or-promise": "^1.0.12" + } + }, + "@graphql-tools/utils": { + "version": "9.2.1", + "resolved": "https://registry.npmjs.org/@graphql-tools/utils/-/utils-9.2.1.tgz", + "integrity": "sha512-WUw506Ql6xzmOORlriNrD6Ugx+HjVgYxt9KCXD9mHAak+eaXSwuGGPyE60hy9xaDEoXKBsG7SkG69ybitaVl6A==", + "requires": { + "@graphql-typed-document-node/core": "^3.1.1", + "tslib": "^2.4.0" + } + }, "@graphql-typed-document-node/core": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/@graphql-typed-document-node/core/-/core-3.2.0.tgz", @@ -2662,9 +2657,9 @@ } }, "typescript": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.1.6.tgz", - "integrity": "sha512-zaWCozRZ6DLEWAWFrVDz1H6FVXzUSfTy5FUMWsQlU8Ym5JP9eO4xkTIROFCQvhQf61z6O/G6ugw3SgAnvvm+HA==", + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz", + "integrity": "sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==", "dev": true }, "unpipe": { @@ -2682,6 +2677,11 @@ "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz", "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==" }, + "value-or-promise": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/value-or-promise/-/value-or-promise-1.0.12.tgz", + "integrity": "sha512-Z6Uz+TYwEqE7ZN50gwn+1LCVo9ZVrpxRPOhOLnncYkY1ZzOYtrX8Fwf/rFktZ8R5mJms6EZf5TqNOMeZmnPq9Q==" + }, "vary": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", diff --git a/dockerfiles/tracing/zipkin-subgraph/package.json b/dockerfiles/tracing/zipkin-subgraph/package.json index 613c4b8dd5..b27965516a 100644 --- a/dockerfiles/tracing/zipkin-subgraph/package.json +++ b/dockerfiles/tracing/zipkin-subgraph/package.json @@ -20,6 +20,6 @@ "zipkin-javascript-opentracing": "^3.0.0" }, "devDependencies": { - "typescript": "5.1.6" + "typescript": "5.2.2" } } diff --git a/xtask/Cargo.lock b/xtask/Cargo.lock index 31de05ed54..9e6fd2d1d3 100644 --- a/xtask/Cargo.lock +++ b/xtask/Cargo.lock @@ -43,16 +43,15 @@ dependencies = [ [[package]] name = "anstream" -version = "0.3.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163" +checksum = "b1f58811cfac344940f1a400b6e6231ce35171f614f26439e80f8c1465c5cc0c" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", - "is-terminal", "utf8parse", ] @@ -82,9 +81,9 @@ dependencies = [ [[package]] name = "anstyle-wincon" -version = "1.0.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c677ab05e09154296dd37acecd46420c17b9713e8366facafa8fc0885167cf4c" +checksum = "58f54d10c6dfa51283a066ceab3ec1ab78d13fae00aa49243a45e4571fb79dfd" dependencies = [ "anstyle", "windows-sys 0.48.0", @@ -229,9 +228,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.23" +version = "4.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03aef18ddf7d879c15ce20f04826ef8418101c7e528014c3eeea13321047dca3" +checksum = "7c8d502cbaec4595d2e7d5f61e318f05417bd2b66fdc3809498f0d3fdf0bea27" dependencies = [ "clap_builder", "clap_derive", @@ -240,9 +239,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.3.23" +version = "4.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8ce6fffb678c9b80a70b6b6de0aad31df727623a70fd9a842c30cd573e2fa98" +checksum = "5891c7bc0edb3e1c2204fc5e94009affabeb1821c9e5fdc3959536c5c0bb984d" dependencies = [ "anstream", "anstyle", @@ -252,9 +251,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.3.12" +version = "4.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54a9bb5758fc5dfe728d1019941681eccaf0cf8a4189b692a0ee2f2ecf90a050" +checksum = "c9fd1a5729c4548118d7d70ff234a44868d00489a4b6597b0b020918a0e91a1a" dependencies = [ "heck", "proc-macro2", @@ -780,17 +779,6 @@ version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" -[[package]] -name = "is-terminal" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" -dependencies = [ - "hermit-abi", - "rustix", - "windows-sys 0.48.0", -] - [[package]] name = "itertools" version = "0.11.0" @@ -1073,9 +1061,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.3" +version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bc1d4caf89fac26a70747fe603c130093b53c773888797a6329091246d651a" +checksum = "12de2eff854e5fa4b1295edd650e227e9d8fb0c9e90b12e7f36d6a6811791a29" dependencies = [ "aho-corasick", "memchr", @@ -1085,9 +1073,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed1ceff11a1dddaee50c9dc8e4938bd106e9d89ae372f192311e7da498e3b69" +checksum = "49530408a136e16e5b486e883fbb6ba058e8e4e8ae6621a77b048b314336e629" dependencies = [ "aho-corasick", "memchr", @@ -1096,9 +1084,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ea92a5b6195c6ef2a0295ea818b312502c6fc94dde986c5553242e18fd4ce2" +checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "reqwest" @@ -1285,18 +1273,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.185" +version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be9b6f69f1dfd54c3b568ffa45c310d6973a5e5148fd40cf515acaf38cf5bc31" +checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.185" +version = "1.0.188" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc59dfdcbad1437773485e0367fea4b090a2e0a16d9ffc46af47764536a298ec" +checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" dependencies = [ "proc-macro2", "quote", diff --git a/xtask/Cargo.toml b/xtask/Cargo.toml index 7367b2d826..66f8db78aa 100644 --- a/xtask/Cargo.toml +++ b/xtask/Cargo.toml @@ -11,7 +11,7 @@ publish = false [dependencies] anyhow = "1" camino = "1" -clap = { version = "4.3.23", features = ["derive"] } +clap = { version = "4.4.1", features = ["derive"] } cargo_metadata = "0.17" # Only use the `clock` features of `chrono` to avoid the `time` dependency # impacted by CVE-2020-26235. https://github.com/chronotope/chrono/issues/602 @@ -26,13 +26,13 @@ libc = "0.2" memorable-wordlist = "0.1.7" nu-ansi-term = "0.49" once_cell = "1" -regex = "1.9.3" +regex = "1.9.4" reqwest = { version = "0.11", default-features = false, features = [ "blocking", "rustls-tls", "rustls-tls-native-roots", ] } -serde = { version = "1.0.185", features = ["derive"] } +serde = { version = "1.0.188", features = ["derive"] } serde_json = "1" tar = "0.4" tempfile = "3" From 028875a4986f1c8eee19ea0cddba007f3d73dfdb Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 30 Aug 2023 14:10:40 +0000 Subject: [PATCH 14/81] chore(deps): update rust crate memchr to 2.6.2 --- Cargo.lock | 4 ++-- apollo-router/Cargo.toml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 87b468f0df..ba1f3e9696 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3717,9 +3717,9 @@ checksum = "8c408dc227d302f1496c84d9dc68c00fec6f56f9228a18f3023f976f3ca7c945" [[package]] name = "memchr" -version = "2.6.1" +version = "2.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f478948fd84d9f8e86967bf432640e46adfb5a4bd4f14ef7e864ab38220534ae" +checksum = "5486aed0026218e61b8a01d5fbd5a0a134649abb71a0e53b7bc088529dced86e" [[package]] name = "memoffset" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 1b22a3c7a8..f120bff483 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -231,7 +231,7 @@ tokio-rustls = "0.24.1" http-serde = "1.1.3" hmac = "0.12.1" parking_lot = "0.12.1" -memchr = "2.6.1" +memchr = "2.6.2" brotli = "3.3.4" zstd = "0.12.4" zstd-safe = "6.0.6" @@ -265,7 +265,7 @@ futures-test = "0.3.28" insta = { version = "1.31.0", features = ["json", "redactions", "yaml"] } introspector-gadget = "0.2.2" maplit = "1.0.2" -memchr = { version = "2.6.1", default-features = false } +memchr = { version = "2.6.2", default-features = false } mockall = "0.11.4" once_cell = "1.18.0" p256 = "0.12.0" From 14e8a1f884646d72558bc1919ab6d296029df90b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 30 Aug 2023 22:39:19 +0000 Subject: [PATCH 15/81] fix(deps): update dependency @apollo/server to v4.9.3 [security] --- .../tracing/datadog-subgraph/package-lock.json | 6 +++--- .../tracing/jaeger-subgraph/package-lock.json | 12 ++++++------ .../tracing/zipkin-subgraph/package-lock.json | 12 ++++++------ 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/dockerfiles/tracing/datadog-subgraph/package-lock.json b/dockerfiles/tracing/datadog-subgraph/package-lock.json index e1c0feeff9..2b8a27a230 100644 --- a/dockerfiles/tracing/datadog-subgraph/package-lock.json +++ b/dockerfiles/tracing/datadog-subgraph/package-lock.json @@ -69,9 +69,9 @@ } }, "node_modules/@apollo/server": { - "version": "4.9.2", - "resolved": "https://registry.npmjs.org/@apollo/server/-/server-4.9.2.tgz", - "integrity": "sha512-DXARzsL7gvBfhUL2gTCpGduaH5wQFZi72/6ZOalpzT9InepIz0wL9TffSNVuaYla5u6JB9kX8WtnVUKf7IuHTA==", + "version": "4.9.3", + "resolved": "https://registry.npmjs.org/@apollo/server/-/server-4.9.3.tgz", + "integrity": "sha512-U56Sx/UmzR3Es344hQ/Ptf2EJrH+kV4ZPoLmgGjWoiwf2wYQ/pRSvkSXgjOvoyE34wSa8Gh7f92ljfLfY+6q1w==", "dependencies": { "@apollo/cache-control-types": "^1.0.3", "@apollo/server-gateway-interface": "^1.1.1", diff --git a/dockerfiles/tracing/jaeger-subgraph/package-lock.json b/dockerfiles/tracing/jaeger-subgraph/package-lock.json index ac867e6837..89c196fbb3 100644 --- a/dockerfiles/tracing/jaeger-subgraph/package-lock.json +++ b/dockerfiles/tracing/jaeger-subgraph/package-lock.json @@ -71,9 +71,9 @@ } }, "node_modules/@apollo/server": { - "version": "4.9.2", - "resolved": "https://registry.npmjs.org/@apollo/server/-/server-4.9.2.tgz", - "integrity": "sha512-DXARzsL7gvBfhUL2gTCpGduaH5wQFZi72/6ZOalpzT9InepIz0wL9TffSNVuaYla5u6JB9kX8WtnVUKf7IuHTA==", + "version": "4.9.3", + "resolved": "https://registry.npmjs.org/@apollo/server/-/server-4.9.3.tgz", + "integrity": "sha512-U56Sx/UmzR3Es344hQ/Ptf2EJrH+kV4ZPoLmgGjWoiwf2wYQ/pRSvkSXgjOvoyE34wSa8Gh7f92ljfLfY+6q1w==", "dependencies": { "@apollo/cache-control-types": "^1.0.3", "@apollo/server-gateway-interface": "^1.1.1", @@ -1553,9 +1553,9 @@ } }, "@apollo/server": { - "version": "4.9.2", - "resolved": "https://registry.npmjs.org/@apollo/server/-/server-4.9.2.tgz", - "integrity": "sha512-DXARzsL7gvBfhUL2gTCpGduaH5wQFZi72/6ZOalpzT9InepIz0wL9TffSNVuaYla5u6JB9kX8WtnVUKf7IuHTA==", + "version": "4.9.3", + "resolved": "https://registry.npmjs.org/@apollo/server/-/server-4.9.3.tgz", + "integrity": "sha512-U56Sx/UmzR3Es344hQ/Ptf2EJrH+kV4ZPoLmgGjWoiwf2wYQ/pRSvkSXgjOvoyE34wSa8Gh7f92ljfLfY+6q1w==", "requires": { "@apollo/cache-control-types": "^1.0.3", "@apollo/server-gateway-interface": "^1.1.1", diff --git a/dockerfiles/tracing/zipkin-subgraph/package-lock.json b/dockerfiles/tracing/zipkin-subgraph/package-lock.json index 7e9bc9ffa9..80a842008c 100644 --- a/dockerfiles/tracing/zipkin-subgraph/package-lock.json +++ b/dockerfiles/tracing/zipkin-subgraph/package-lock.json @@ -72,9 +72,9 @@ } }, "node_modules/@apollo/server": { - "version": "4.9.2", - "resolved": "https://registry.npmjs.org/@apollo/server/-/server-4.9.2.tgz", - "integrity": "sha512-DXARzsL7gvBfhUL2gTCpGduaH5wQFZi72/6ZOalpzT9InepIz0wL9TffSNVuaYla5u6JB9kX8WtnVUKf7IuHTA==", + "version": "4.9.3", + "resolved": "https://registry.npmjs.org/@apollo/server/-/server-4.9.3.tgz", + "integrity": "sha512-U56Sx/UmzR3Es344hQ/Ptf2EJrH+kV4ZPoLmgGjWoiwf2wYQ/pRSvkSXgjOvoyE34wSa8Gh7f92ljfLfY+6q1w==", "dependencies": { "@apollo/cache-control-types": "^1.0.3", "@apollo/server-gateway-interface": "^1.1.1", @@ -1612,9 +1612,9 @@ } }, "@apollo/server": { - "version": "4.9.2", - "resolved": "https://registry.npmjs.org/@apollo/server/-/server-4.9.2.tgz", - "integrity": "sha512-DXARzsL7gvBfhUL2gTCpGduaH5wQFZi72/6ZOalpzT9InepIz0wL9TffSNVuaYla5u6JB9kX8WtnVUKf7IuHTA==", + "version": "4.9.3", + "resolved": "https://registry.npmjs.org/@apollo/server/-/server-4.9.3.tgz", + "integrity": "sha512-U56Sx/UmzR3Es344hQ/Ptf2EJrH+kV4ZPoLmgGjWoiwf2wYQ/pRSvkSXgjOvoyE34wSa8Gh7f92ljfLfY+6q1w==", "requires": { "@apollo/cache-control-types": "^1.0.3", "@apollo/server-gateway-interface": "^1.1.1", From b215bbf81d6402e629bc5c105e5731eddbd5529b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 31 Aug 2023 00:07:36 +0000 Subject: [PATCH 16/81] fix(deps): update dependency dd-trace to v4.14.0 --- .../datadog-subgraph/package-lock.json | 23 +++++++++++++------ 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/dockerfiles/tracing/datadog-subgraph/package-lock.json b/dockerfiles/tracing/datadog-subgraph/package-lock.json index 2b8a27a230..da582feb1e 100644 --- a/dockerfiles/tracing/datadog-subgraph/package-lock.json +++ b/dockerfiles/tracing/datadog-subgraph/package-lock.json @@ -348,16 +348,25 @@ } }, "node_modules/@datadog/native-iast-rewriter": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/@datadog/native-iast-rewriter/-/native-iast-rewriter-2.0.1.tgz", - "integrity": "sha512-Mm+FG3XxEbPrAfJQPOMHts7iZZXRvg9gnGeeFRGkyirmRcQcOpZO4wFe/8K61DUVa5pXpgAJQ2ZkBGYF1O9STg==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@datadog/native-iast-rewriter/-/native-iast-rewriter-2.1.3.tgz", + "integrity": "sha512-4oxMFz5ZEpOK3pRc9KjquMgkRP6D+oPQVIzOk4dgG8fl2iepHtCa3gna/fQBfdWIiX5a2j65O3R1zNp2ckk8JA==", "dependencies": { + "lru-cache": "^7.14.0", "node-gyp-build": "^4.5.0" }, "engines": { "node": ">= 10" } }, + "node_modules/@datadog/native-iast-rewriter/node_modules/lru-cache": { + "version": "7.18.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz", + "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==", + "engines": { + "node": ">=12" + } + }, "node_modules/@datadog/native-iast-rewriter/node_modules/node-gyp-build": { "version": "4.6.0", "resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.6.0.tgz", @@ -859,13 +868,13 @@ "integrity": "sha512-/RC5F4l1SCqD/jazwUF6+t34Cd8zTSAGZ7rvvZu1whZUhD2a5MOGKjSGowoGcpj/cbVZk1ZODIooJEQQq3nNAA==" }, "node_modules/dd-trace": { - "version": "4.13.1", - "resolved": "https://registry.npmjs.org/dd-trace/-/dd-trace-4.13.1.tgz", - "integrity": "sha512-2MGnMhA4bDnkv0B89rrv59ur1YAPL7CXzQ3tuvJqu8p/PlWXi2e11DXfNkEWXGB3JJXJxvqNsezmOleu48UEkg==", + "version": "4.14.0", + "resolved": "https://registry.npmjs.org/dd-trace/-/dd-trace-4.14.0.tgz", + "integrity": "sha512-hxYWynK44VWz5FjQTG9QmQwpb9WkgBQ1QMfan9VfXt04N1H9slvFBteh3rKOUwZk61mHmqorWC0uYz4ipcQUig==", "hasInstallScript": true, "dependencies": { "@datadog/native-appsec": "^3.2.0", - "@datadog/native-iast-rewriter": "2.0.1", + "@datadog/native-iast-rewriter": "2.1.3", "@datadog/native-iast-taint-tracking": "1.5.0", "@datadog/native-metrics": "^2.0.0", "@datadog/pprof": "3.2.0", From 4201304419b07ac2e05810191fcaeb1f2ff5bfec Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 31 Aug 2023 13:07:55 +0200 Subject: [PATCH 17/81] fix(deps): update rust crate router-bridge to v0.5.4+v2.5.3 (#3581) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [![Mend Renovate](https://app.renovatebot.com/images/banner.svg)](https://renovatebot.com) This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [router-bridge](https://www.apollographql.com/apollo-federation/) ([source](https://togithub.com/apollographql/federation)) | dependencies | patch | `=0.5.3+v2.5.3` -> `=0.5.4+v2.5.3` | --- ### Configuration 📅 **Schedule**: Branch creation - At any time (no schedule defined), Automerge - At any time (no schedule defined). 🚦 **Automerge**: Disabled by config. Please merge this manually once you are satisfied. ♻ **Rebasing**: Whenever PR becomes conflicted, or you tick the rebase/retry checkbox. 🔕 **Ignore**: Close this PR and you won't be reminded about this update again. --- - [ ] If you want to rebase/retry this PR, check this box --- This PR has been generated by [Mend Renovate](https://www.mend.io/free-developer-tools/renovate/). View repository job log [here](https://developer.mend.io/github/apollographql/router). Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: Geoffroy Couprie --- Cargo.lock | 4 ++-- apollo-router/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ba1f3e9696..ec07e8fddd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5192,9 +5192,9 @@ dependencies = [ [[package]] name = "router-bridge" -version = "0.5.3+v2.5.3" +version = "0.5.4+v2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45db3eb6961669a47e611e25da5aabf767d81d0396ad0fb2efb7694587a84b01" +checksum = "82d3e1bfc37d92eab53edcd17d4290b5aa8fb95ab43d0408f5d9b56157a6d61c" dependencies = [ "anyhow", "async-channel", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index f120bff483..51df5eb607 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -171,7 +171,7 @@ reqwest = { version = "0.11.19", default-features = false, features = [ "stream", ] } # note: this dependency should _always_ be pinned, prefix the version with an `=` -router-bridge = "=0.5.3+v2.5.3" +router-bridge = "=0.5.4+v2.5.3" rust-embed = "6.8.1" rustls = "0.21.6" rustls-pemfile = "1.0.3" From bda155e628bbbdb218c84c9394b472c72ca8f245 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 31 Aug 2023 22:19:15 +0000 Subject: [PATCH 18/81] fix(deps): update rust crate clap to 4.4.2 --- Cargo.lock | 19 +++++++++---------- apollo-router-scaffold/Cargo.toml | 2 +- apollo-router/Cargo.toml | 2 +- xtask/Cargo.lock | 13 ++++++------- xtask/Cargo.toml | 2 +- 5 files changed, 18 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ec07e8fddd..1c570d7ec0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -295,7 +295,7 @@ dependencies = [ "buildstructor 0.5.3", "bytes", "ci_info", - "clap 4.4.1", + "clap 4.4.2", "console-subscriber", "dashmap", "derivative", @@ -439,7 +439,7 @@ version = "1.28.1" dependencies = [ "anyhow", "cargo-scaffold", - "clap 4.4.1", + "clap 4.4.2", "copy_dir", "regex", "str_inflector", @@ -1356,20 +1356,19 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.1" +version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c8d502cbaec4595d2e7d5f61e318f05417bd2b66fdc3809498f0d3fdf0bea27" +checksum = "6a13b88d2c62ff462f88e4a121f17a82c1af05693a2f192b5c38d14de73c19f6" dependencies = [ "clap_builder", "clap_derive", - "once_cell", ] [[package]] name = "clap_builder" -version = "4.4.1" +version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5891c7bc0edb3e1c2204fc5e94009affabeb1821c9e5fdc3959536c5c0bb984d" +checksum = "2bb9faaa7c2ef94b2743a21f5a29e6f0010dff4caa69ac8e9d6cf8b6fa74da08" dependencies = [ "anstream", "anstyle", @@ -1379,9 +1378,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.4.0" +version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9fd1a5729c4548118d7d70ff234a44868d00489a4b6597b0b020918a0e91a1a" +checksum = "0862016ff20d69b84ef8247369fabf5c008a7417002411897d40ee1f4532b873" dependencies = [ "heck 0.4.1", "proc-macro2", @@ -1677,7 +1676,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.4.1", + "clap 4.4.2", "criterion-plot", "futures", "is-terminal", diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml index 7f8f650f0c..c738ea1f2f 100644 --- a/apollo-router-scaffold/Cargo.toml +++ b/apollo-router-scaffold/Cargo.toml @@ -8,7 +8,7 @@ publish = false [dependencies] anyhow = "1.0.75" -clap = { version = "4.4.1", features = ["derive"] } +clap = { version = "4.4.2", features = ["derive"] } cargo-scaffold = { version = "0.8.10", default-features = false } regex = "1" str_inflector = "0.12.0" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 51df5eb607..8b25903d3a 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -69,7 +69,7 @@ axum = { version = "0.6.20", features = ["headers", "json", "original-uri"] } base64 = "0.21.2" buildstructor = "0.5.3" bytes = "1.4.0" -clap = { version = "4.4.1", default-features = false, features = [ +clap = { version = "4.4.2", default-features = false, features = [ "env", "derive", "std", diff --git a/xtask/Cargo.lock b/xtask/Cargo.lock index 9e6fd2d1d3..d11f5724b0 100644 --- a/xtask/Cargo.lock +++ b/xtask/Cargo.lock @@ -228,20 +228,19 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.1" +version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c8d502cbaec4595d2e7d5f61e318f05417bd2b66fdc3809498f0d3fdf0bea27" +checksum = "6a13b88d2c62ff462f88e4a121f17a82c1af05693a2f192b5c38d14de73c19f6" dependencies = [ "clap_builder", "clap_derive", - "once_cell", ] [[package]] name = "clap_builder" -version = "4.4.1" +version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5891c7bc0edb3e1c2204fc5e94009affabeb1821c9e5fdc3959536c5c0bb984d" +checksum = "2bb9faaa7c2ef94b2743a21f5a29e6f0010dff4caa69ac8e9d6cf8b6fa74da08" dependencies = [ "anstream", "anstyle", @@ -251,9 +250,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.4.0" +version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9fd1a5729c4548118d7d70ff234a44868d00489a4b6597b0b020918a0e91a1a" +checksum = "0862016ff20d69b84ef8247369fabf5c008a7417002411897d40ee1f4532b873" dependencies = [ "heck", "proc-macro2", diff --git a/xtask/Cargo.toml b/xtask/Cargo.toml index 66f8db78aa..840f2a9283 100644 --- a/xtask/Cargo.toml +++ b/xtask/Cargo.toml @@ -11,7 +11,7 @@ publish = false [dependencies] anyhow = "1" camino = "1" -clap = { version = "4.4.1", features = ["derive"] } +clap = { version = "4.4.2", features = ["derive"] } cargo_metadata = "0.17" # Only use the `clock` features of `chrono` to avoid the `time` dependency # impacted by CVE-2020-26235. https://github.com/chronotope/chrono/issues/602 From 1472e8a64739e56ca0ec3143815f22131a4287bb Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Fri, 1 Sep 2023 16:07:52 +0200 Subject: [PATCH 19/81] Reintroduce authorization documentation (#3673) Co-authored-by: Maria Elisabeth Schreiber Co-authored-by: Edward Huang <18322228+shorgi@users.noreply.github.com> --- .../feat_geal_authorization_directives.md | 24 + .../maint_geal_authorization_analytics.md | 8 + docs/source/config.json | 7 + docs/source/configuration/authorization.mdx | 579 ++++++++++++++++++ docs/source/enterprise-features.mdx | 1 + 5 files changed, 619 insertions(+) create mode 100644 .changesets/feat_geal_authorization_directives.md create mode 100644 .changesets/maint_geal_authorization_analytics.md create mode 100644 docs/source/configuration/authorization.mdx diff --git a/.changesets/feat_geal_authorization_directives.md b/.changesets/feat_geal_authorization_directives.md new file mode 100644 index 0000000000..453076cdc9 --- /dev/null +++ b/.changesets/feat_geal_authorization_directives.md @@ -0,0 +1,24 @@ +### GraphOS Enterprise: authorization directives ([PR #3397](https://github.com/apollographql/router/pull/3397), [PR #3662](https://github.com/apollographql/router/pull/3662)) + +We introduce two new directives, `requiresScopes` and `@authenticated`, that define authorization policies for fields and types in the supergraph schema. + +They are defined as follows: + +```graphql +scalar federation__Scope +directive @requiresScopes(scopes: [[federation__Scope!]!]!) on OBJECT | FIELD_DEFINITION | INTERFACE | SCALAR | ENUM + +directive @authenticated on OBJECT | FIELD_DEFINITION | INTERFACE | SCALAR | ENUM +``` + +The implementation hooks into the request lifecycle at multiple steps: +- In query analysis, we extract the list of scopes necessary to authorize the query. +- In a supergraph plugin, we calculate the authorization status and put it in the request context: + - for `@requiresScopes`, this is the intersection of the query's required scopes and the scopes provided in the request token + - for `@authenticated`, it is `is_authenticated` or not +- In the query planning phase, we filter the query to remove unauthorized fields before proceeding with query planning. +- At the subgraph level, if query deduplication is active, the authorization status is used to group queries together. +- At the execution service level, the response is first formatted according to the filtered query, which removed any unauthorized information, then to the shape of the original query, which propagates nulls as needed. +- At the execution service level, errors are added to the response indicating which fields were removed because they were not authorized. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3397 https://github.com/apollographql/router/pull/3662 \ No newline at end of file diff --git a/.changesets/maint_geal_authorization_analytics.md b/.changesets/maint_geal_authorization_analytics.md new file mode 100644 index 0000000000..50f235a639 --- /dev/null +++ b/.changesets/maint_geal_authorization_analytics.md @@ -0,0 +1,8 @@ +### Add a metric tracking authorization usage ([PR #3660](https://github.com/apollographql/router/pull/3660)) + +The new metric is a counter called `apollo.router.operations.authorization` and contains the following boolean attributes: +- `filtered`: the query has one or more filtered fields +- `requires_scopes`: the query uses fields or types tagged with the `@requiresScopes` directive +- `authenticated`: the query uses fields or types tagged with the `@authenticated` directive + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3660 \ No newline at end of file diff --git a/docs/source/config.json b/docs/source/config.json index 74a3cfbd31..28f2d33a7d 100644 --- a/docs/source/config.json +++ b/docs/source/config.json @@ -42,6 +42,13 @@ "enterprise" ] ], + "Authorization": [ + "/configuration/authorization", + [ + "enterprise", + "experimental" + ] + ], "Subgraph Authentication": "/configuration/authn-subgraph", "Operation limits": [ "/configuration/operation-limits", diff --git a/docs/source/configuration/authorization.mdx b/docs/source/configuration/authorization.mdx new file mode 100644 index 0000000000..1b6d1e47f8 --- /dev/null +++ b/docs/source/configuration/authorization.mdx @@ -0,0 +1,579 @@ +--- +title: Authorization in the Apollo Router +description: Strengthen service security with a centralized governance layer +minVersion: 1.29.0 +--- + + + + + +APIs provide access to business-critical data. Unrestricted access can result in data breaches, monetary losses, or potential denial of service. Even for internal services, checks can be essential to limit data to authorized parties. + +Services may have their own access controls, but enforcing authorization _in the Apollo Router_ is valuable for a few reasons: + +- **Optimal query execution**: Validating authorization _before_ processing requests enables the early termination of unauthorized requests. Stopping unauthorized requests at the edge of your graph reduces the load on your services and enhances performance. + + ```mermaid + flowchart LR; + clients(Client); + subgraph Router[" "] + router(["Apollo Router"]); + serviceB[Users
API]; + serviceC[Posts
API]; + end + router -.->|"❌ Subquery"| serviceB & serviceC; + clients -->|"⚠️Unauthorized
request"| router; + ``` + + - If every field in a particular subquery requires authorization, the router's [query planner](../customizations/overview#request-path) can _eliminate entire subgraph requests_ for unauthorized requests. For example, a request may have permission to view a particular user's posts on a social media platform but not have permission to view any of that user's personally identifiable information (PII). Check out [How it works](#how-it-works) to learn more. + + ```mermaid + flowchart LR; + clients(Client); + subgraph Router[" "] + router(["Apollo Router"]); + serviceB[Users
API]; + serviceC[Posts
API]; + end + router -->|"✅ Authorized
subquery"| serviceC; + router -.->|"❌ Unauthorized
subquery"| serviceB; + clients -->|"⚠️ Partially authorized
request"| router; + ``` + - Also, [query deduplication](./traffic-shaping/#query-deduplication) groups requested fields based on their required authorization. Entire groups can be eliminated from the query plan if they don't have the correct authorization. + +- **Declarative access rules**: You define access controls at the field level, and GraphOS [composes](#composition-and-federation) them across your services. These rules create graph-native governance without the need for an extra orchestration layer. + +- **Principled architecture**: Through composition, the router centralizes authorization logic while allowing for auditing at the service level. This centralized authorization is an initial checkpoint that other service layers can reinforce. + + ```mermaid + flowchart LR; + clients(Client); + Level2:::padding + subgraph Level1["
🔐 Router layer                                                   "] + router(["Apollo Router"]); + subgraph Level2["🔐 Service layer"] + serviceB[Users
API]; + serviceC[Posts
API]; + end + end + + router -->|"Subquery"| serviceB & serviceC; + clients -->|"Request"| router; + + classDef padding padding-left:1em, padding-right:1em + ``` + +## How access control works + +The Apollo Router provides access controls via **authorization directives** that define access to specific fields and types across your supergraph: + +- The [`@requiresScopes`](#requiresscopes) directive allows granular access control through the scopes you define. +- The [`@authenticated`](#authenticated) directive allows access to the annotated field or type for _authenticated requests only_. + +For example, imagine you're building a social media platform that includes a `Users` subgraph. You can use the [`@requiresScopes`](#requiresscopes) directive to declare that viewing other users' information requires the `read:user` scope: + +```graphql +type Query { + users: [User!]! @requiresScopes(scopes: [["read:users"]]) +} +``` + +You can use the [`@authenticated`](#authenticated) directive to declare that users must be logged in to update their own information: + +```graphql +type Mutation { + updateUser(input: UpdateUserInput!): User! @authenticated +} +``` + +You can define both directives—together or separately—at the field level to fine-tune your access controls. +GraphOS [composes](#composition-and-federation) restrictions into the supergraph schema so that each subgraph's restrictions are respected. +The router then enforces these directives on all incoming requests. + +## Prerequisites + +> ⚠️ Only the Apollo Router supports authorization directives—[`@apollo/gateway`](/federation/v1/gateway/) does _not_. Check out the [migration guide](../migrating-from-gateway/) if you'd like to use them. + +Before using the authorization directives in your subgraph schemas, you must: +- Validate that your Apollo Router uses version `1.29.0` or later and is [connected to your GraphOS Enterprise organization](../enterprise-features/#enabling-enterprise-features) +- Include **[claims](#configure-request-claims)** in requests made to the router + +### Configure request claims + +Claims are the individual details of a request's authentication and scope. They might include details like the ID of the user making the request and any authorization scopes—for example, `read:profiles`— assigned to that user. The authorization directives use a request's claims to evaluate which fields and types are authorized. + +To provide the router with the claims it needs, you must either configure JSON Web Token (JWT) authentication or add an external coprocessor that adds claims to a request's context. In some cases (explained below), you may require both. + +- **JWT authentication configuration**: If you configure [JWT authentication](./authn-jwt), the Apollo Router [automatically adds a JWT token's claims](./authn-jwt#working-with-jwt-claims) to the request's context at the `apollo_authentication::JWT::claims` key. +- **Adding claims via coprocessor**: If you can't use JWT authentication, you can [add claims with a coprocessor](/customizations/coprocessor#adding-authorization-claims-via-coprocessor). Coprocessors let you hook into the Apollo Router's request-handling lifecycle with custom code. +- **Augmenting JWT claims via coprocessor**: Your authorization policies may require information beyond what your JSON web tokens provide. For example, a token's claims may include user IDs, which you then use to look up user roles. For situations like this, you can [augment the claims](./authn-jwt#claim-augmentation-via-coprocessors) from your JSON web tokens with coprocessors. + +## Authorization directives + +**Authorization directives are enabled in your router by default**. If you want to _disable_ them, include the following in your router's [YAML config file](./overview/): + +```yaml title="router.yaml" +authorization: + preview_directives: + enabled: false +``` + +### `@requiresScopes` + +The `@requiresScopes` directive marks fields and types as restricted based on required scopes. +The directive includes a `scopes` argument with an array of the required scopes to declare which scopes are required: + +```graphql +@requiresScopes(scopes: [["scope1", "scope2", "scope3"]]) +``` + +Depending on the scopes present on the request, the router filters out unauthorized fields and types. + +> You can use Boolean logic to define the required scopes. See [Combining required scopes](#combining-required-scopes-with-andor-logic) for details. + +The directive validates the required scopes by loading the claims object at the `apollo_authentication::JWT::claims` key in a request's context. +The claims object's `scope` key's value should be a space-separated string of scopes in the format defined by the [OAuth2 RFC for access token scopes](https://datatracker.ietf.org/doc/html/rfc6749#section-3.3). + +```rhai +claims = context["apollo_authentication::JWT::claims"] +claims["scope"] = "scope1 scope2 scope3" +``` + +#### Usage + +To use the `@requiresScopes` directive in a subgraph, you can [import it from the `@link` directive](/federation/federated-types/federated-directives/#importing-directives) like so: + +```graphql +extend schema + @link( + url: "https://specs.apollo.dev/federation/v2.5", + import: [..., "@requiresScopes"]) +``` + +#### Combining required scopes with `AND`/`OR` logic + +A request must include _all_ elements in the inner-level `scopes` array to resolve the associated field or type. In other words, the authorization validation uses **AND** logic between the elements in the inner-level `scopes` array. + +```graphql +@requiresScopes(scopes: [["scope1", "scope2", "scope3"]]) +``` + +For the preceding example, a request would need `scope1` **AND** `scope2` **AND** `scope3` to be authorized. + +You can use nested arrays to introduce **OR** logic: + +```graphql +@requiresScopes(scopes: [["scope1"], ["scope2"], ["scope3"]]) +``` + +For the preceding example, a request would need `scope1` **OR** `scope2` **OR** `scope3` to be authorized. + +You can nest arrays and elements as needed to achieve your desired logic. For example: + +```graphql +@requiresScopes(scopes: [["scope1", "scope2"], ["scope3"]]) +``` + +This syntax requires requests to have either (`scope1` **AND** `scope2`) **OR** just `scope3` to be authorized. + + +#### Example `@requiresScopes` use case + +Imagine the social media platform you're building lets users view other users' information only if they have the required permissions. +Your schema may look something like this: + +```graphql +type Query { + user(id: ID!): User @requiresScopes(scopes: [["read:others"]]) + users: [User!]! @requiresScopes(scopes: [["read:others"]]) + post(id: ID!): Post +} + +type User { + id: ID! + username: String + email: String @requiresScopes(scopes: [["read:email"]]) + profileImage: String + posts: [Post!]! +} + +type Post { + id: ID! + author: User! + title: String! + content: String! +} +``` + +Depending on a request's attached scopes, the router executes the following query differently. +If the request includes only the `read:others` scope, then the router executes the following filtered query: + + + +```graphql title="Raw query to router" +query { + users { + username + profileImage + email + } +} +``` + +```graphql title="Scopes: 'read:others'" +query { + users { + username + profileImage + } +} +``` + + + +The response would include an error at the `/users/@/email` path since that field requires the `read:emails` scope. +The router can execute the entire query successfully if the request includes the `read:others read:emails` scope set. + +### `@authenticated` + +The `@authenticated` directive marks specific fields and types as requiring authentication. +It works by checking for the `apollo_authentication::JWT::claims` key in a request's context, that is added either by the JWT authentication plugin, when the request contains a valid JWT, or by an authentication coprocessor. +If the key exists, it means the request is authenticated, and the router executes the query in its entirety. +If the request is unauthenticated, the router removes `@authenticated` fields before planning the query and only executes the parts of the query that don't require authentication. + +#### Usage + +To use the `@authenticated` directive in a subgraph, you can [import it from the `@link` directive](/federation/federated-types/federated-directives/#importing-directives) like so: + +```graphql +extend schema + @link( + url: "https://specs.apollo.dev/federation/v2.5", + import: [..., "@authenticated"]) +``` + +#### Example `@authenticated` use case + +Diving deeper into the [social media example](#example-requiresscopes-use-case): let's say unauthenticated users can view a post's title, author, and content. +However, you only want authenticated users to see the number of views a post has received. +You also need to be able to query for an authenticated user's information. + +The relevant part of your schema may look something like this: + +```graphql +type Query { + me: User @authenticated + post(id: ID!): Post +} + +type User { + id: ID! + username: String + email: String @requiresScopes(scopes: ["read:email"]) + posts: [Post!]! +} + +type Post { + id: ID! + author: User! + title: String! + content: String! + views: Int @authenticated +} + +``` + +Consider the following query: + +```graphql title="Sample query" +query { + me { + username + } + post(id: "1234") { + title + views + } +} +``` + +The router would execute the entire query for an authenticated request. +For an unauthenticated request, the router would remove the `@authenticated` fields and execute the filtered query. + + + +```graphql title="Query executed for an authenticated request" +query { + me { + username + } + post(id: "1234") { + title + views + } +} +``` + +```graphql title="Query executed for an unauthenticated request" +query { + post(id: "1234") { + title + } +} +``` + + + +For an unauthenticated request, the router doesn't attempt to resolve the top-level `me` query, nor the views for the post with `id: "1234"`. +The response retains the initial request's shape but returns `null` for unauthorized fields and applies the [standard GraphQL null propagation rules](https://www.apollographql.com/blog/graphql/basics/using-nullability-in-graphql/#what-happens-if-you-try-to-return-null-for-a-non-null-field). + +```json title="Unauthenticated request response" +{ + "data": { + "me": null, + "post": { + "title": "Securing supergraphs", + } + }, + "errors": [ + { + "message": "Unauthorized field or type", + "path": [ + "me" + ], + "extensions": { + "code": "UNAUTHORIZED_FIELD_OR_TYPE" + } + }, + { + "message": "Unauthorized field or type", + "path": [ + "post", + "views" + ], + "extensions": { + "code": "UNAUTHORIZED_FIELD_OR_TYPE" + } + } + ] +} +``` + +If _every_ requested field requires authentication and a request is unauthenticated, the router generates an error indicating that the query is unauthorized. + +## Composition and federation + +GraphOS's composition strategy for authorization directives is intentionally accumulative. When you define authorization directives on fields and types in subgraphs, GraphOS composes them into the supergraph schema. In other words, if subgraph fields or types include `@requiresScopes` or `@authenticated` directives, they are set on the supergraph too. + +#### Composition with `AND`/`OR` logic + +If shared subgraph fields include multiple directives, composition merges them. For example, suppose the `me` query requires `@authentication` in one subgraph: + + +```graphql title="Subgraph A" +type Query { + me: User @authenticated +} + +type User { + id: ID! + username: String + email: String +} +``` + +and the `read:user` scope in another subgraph: + +```graphql title="Subgraph B" +type Query { + me: User @requiresScopes(scopes: [["read:user"]]) +} + +type User { + id: ID! + username: String + email: String +} +``` + +A request would need to both be authenticated **AND** have the required scope. Recall that the `@authenticated` directive only checks for the existence of the `apollo_authentication::JWT::claims` key in a request's context, so authentication is guaranteed if the request includes scopes. + +If multiple shared subgraph fields include `@requiresScopes`, the supergraph schema merges them with the same logic used to [combine scopes for a single use of `@requiresScopes`](#combining-required-scopes-with-andor-logic). For example, if one subgraph requires the `read:others` scope on the `users` query: + +```graphql title="Subgraph A" +type Query { + users: [User!]! @requiresScopes(scopes: [["read:others"]]) +} +``` + +and another subgraph requires the `read:profiles` scope on `users` query: + +```graphql title="Subgraph B" +type Query { + users: [User!]! @requiresScopes(scopes: [["read:profiles"]]) +} +``` + +Then the supergraph schema would require _both_ scopes for it. + +```graphql title="Supergraph" +type Query { + users: [User!]! @requiresScopes(scopes: [["read:others", "read:profiles"]]) +} +``` + +As with [combining scopes for a single use of `@requiresScopes`](#combining-required-scopes-with-andor-logic), you can use nested arrays to introduce **OR** logic: + +```graphql title="Subgraph A" +type Query { + users: [User!]! @requiresScopes(scopes: [["read:others", "read:users"]]) +} +``` + +```graphql title="Subgraph B" +type Query { + users: [User!]! @requiresScopes(scopes: [["read:profiles"]]) +} +``` + +Since both `scopes` arrays are nested arrays, they would be composed using **OR** logic into the supergraph schema: + +```graphql title="Supergraph" +type Query { + users: [User!]! @requiresScopes(scopes: [["read:others", "read:users"], ["read:profiles"]]) +} +``` + +This syntax means a request needs either (`read:others` **AND** `read:users`) scopes **OR** just the `read:profiles` scope to be authorized. + +### Authorization and `@key` fields + +The [`@key` directive](https://www.apollographql.com/docs/federation/entities/) lets you create an entity whose fields resolve across multiple subgraphs. +If you use authorization directives on fields defined in [`@key` directives](https://www.apollographql.com/docs/federation/entities/), Apollo still uses those fields to compose entities between the subgraphs, but the client cannot query them directly. + +Consider these example subgraph schemas: + +```graphql title="Product subgraph" +type Query { + product: Product +} + +type Product @key(fields: "id") { + id: ID! @authenticated + name: String! + price: Int @authenticated +} +``` + +```graphql title="Inventory subgraph" +type Query { + product: Product +} + +type Product @key(fields: "id") { + id: ID! @authenticated + inStock: Boolean! +} +``` + +An unauthenticated request would successfully execute this query: + +```graphql +query { + product { + name + inStock + } +} +``` + +Specifically, under the hood, the router would use the `id` field to resolve the `Product` entity, but it wouldn't return it. + +For the following query, an unauthenticated request would resolve `null` for `id`. And since `id` is a non-nullable field, `product` would return `null`. + +```graphql +query { + product { + id + username + } +} +``` + +This behavior resembles what you can create with [contracts](/graphos/delivery/contracts/) and the [`@inaccessible` directive](https://www.apollographql.com/docs/federation/federated-types/federated-directives/#inaccessible). + +### Authorization and interfaces + +If a type [implementing an interface](https://www.apollographql.com/docs/apollo-server/schema/unions-interfaces/#interface-type) requires authorization, unauthorized requests can query the interface, but not any parts of the type that require authorization. + +For example, consider this schema where the `Post` interface doesn't require authentication, but the `PrivateBlog` type, which implements `Post`, does: + +```graphql +type Query { + posts: [Post!]! +} + +type User { + id: ID! + username: String + posts: [Post!]! +} + +interface Post { + id: ID! + author: User! + title: String! + content: String! +} + +type PrivateBlog implements Post @authenticated { + id: ID! + author: User! + title: String! + content: String! + publishAt: String + allowedViewers: [User!]! +} +``` + +If an unauthenticated request were to make this query: + +```graphql +query { + posts { + id + author + title + ... on PrivateBlog { + allowedViewers + } + } +} +``` + +The router would filter the query as follows: + +```graphql +query { + posts { + id + author + title + } +} +``` + +The response would include an `"UNAUTHORIZED_FIELD_OR_TYPE"` error at the `/posts/@/allowedViewers` path. + +## Query deduplication + +You can enable [query deduplication](../configuration/traffic-shaping/#query-deduplication) in the router to reduce redundant requests to a subgraph. The router does this by buffering similar queries and reusing the result. + +**Query deduplication takes authorization into account.** First, the router groups unauthenticated queries together. Then it groups authenticated queries by their required scope set. It uses these groups to execute queries efficiently when fulfilling requests. + +## Introspection + +Introspection is turned off in the router by default, [as is best production practice](https://www.apollographql.com/blog/graphql/security/why-you-should-disable-graphql-introspection-in-production/). If you've chosen to [enable it](./overview/#introspection), keep in mind that **authorization directives don't affect introspection**. All fields that require authorization remain visible. However, directives applied to fields _aren't_ visible. If introspection might reveal too much information about internal types, then be sure it hasn't been enabled in your router configuration. + +With introspection turned off, you can use GraphOS's [schema registry](/graphos/delivery/) to explore your supergraph schema and empower your teammates to do the same. If you want to completely remove fields from a graph rather than just preventing access (even with introspection on), consider building a [contract graph](/graphos/delivery/contracts/). diff --git a/docs/source/enterprise-features.mdx b/docs/source/enterprise-features.mdx index 5594b52c39..570aaee444 100644 --- a/docs/source/enterprise-features.mdx +++ b/docs/source/enterprise-features.mdx @@ -11,6 +11,7 @@ The Apollo Router provides expanded performance, security, and customization fea - **Real-time updates** via [GraphQL subscriptions](./executing-operations/subscription-support/) - **Authentication of inbound requests** via [JSON Web Token (JWT)](./configuration/authn-jwt/) +- **Access control** of specific fields and types through the [`@requiresScopes`](./configuration/authorization#requiresscopes) and [`@authenticated`](./configuration/authorization#authenticated) directives - Redis-backed [**distributed caching** of query plans and persisted queries](./configuration/distributed-caching/) - **Custom request handling** in any language via [external coprocessing](./customizations/coprocessor/) - **Mitigation of potentially malicious requests** via [operation limits](./configuration/operation-limits) and [safelisting with persisted queries](./configuration/persisted-queries) From bc188ab2c722702a735e96aa97c6e2113c1db7e1 Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Fri, 1 Sep 2023 18:32:45 +0300 Subject: [PATCH 20/81] Revert "Fix metrics attribute types" (#3722) Reverts apollographql/router#3701 --- .changesets/fix_bryn_fix_metrics_typing.md | 6 - apollo-router/src/configuration/metrics.rs | 54 +--- ...etrics__test__metrics@apq.router.yaml.snap | 6 +- ...st__metrics@authorization.router.yaml.snap | 4 +- ...@authorization_directives.router.yaml.snap | 4 +- ...test__metrics@coprocessor.router.yaml.snap | 12 +- ...s__test__metrics@entities.router.yaml.snap | 18 +- ...ics__test__metrics@limits.router.yaml.snap | 16 +- ...metrics@persisted_queries.router.yaml.snap | 6 +- ...st__metrics@subscriptions.router.yaml.snap | 10 +- ...__test__metrics@telemetry.router.yaml.snap | 12 +- ...__metrics@traffic_shaping.router.yaml.snap | 16 +- .../src/plugins/telemetry/metrics/layer.rs | 241 +++--------------- apollo-router/src/plugins/telemetry/mod.rs | 2 +- .../src/query_planner/bridge_query_planner.rs | 12 +- 15 files changed, 100 insertions(+), 319 deletions(-) delete mode 100644 .changesets/fix_bryn_fix_metrics_typing.md diff --git a/.changesets/fix_bryn_fix_metrics_typing.md b/.changesets/fix_bryn_fix_metrics_typing.md deleted file mode 100644 index b4f26d67ed..0000000000 --- a/.changesets/fix_bryn_fix_metrics_typing.md +++ /dev/null @@ -1,6 +0,0 @@ -### Fix metrics attribute types ([Issue #3687](https://github.com/apollographql/router/issues/3687)) - -Metrics attributes were being coerced to strings. This is now fixed. -In addition, the logic around types accepted as metrics attributes has been simplified. It will log and ignore values of the wrong type. - -By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/3701 diff --git a/apollo-router/src/configuration/metrics.rs b/apollo-router/src/configuration/metrics.rs index eb459b92e9..0e8d5e74fb 100644 --- a/apollo-router/src/configuration/metrics.rs +++ b/apollo-router/src/configuration/metrics.rs @@ -5,7 +5,6 @@ use std::time::Duration; use jsonpath_rust::JsonPathInst; use paste::paste; -use serde::Serialize; use serde_json::Value; use tokio::sync::OwnedSemaphorePermit; @@ -17,39 +16,7 @@ pub(crate) struct MetricsHandle { pub(crate) struct Metrics { yaml: Value, - metrics: HashMap)>, -} - -enum AttributeValue { - Bool(bool), - U64(u64), - I64(i64), - F64(f64), - String(String), -} - -impl Serialize for AttributeValue { - fn serialize(&self, serializer: S) -> Result { - match self { - AttributeValue::Bool(value) => serializer.serialize_bool(*value), - AttributeValue::U64(value) => serializer.serialize_u64(*value), - AttributeValue::I64(value) => serializer.serialize_i64(*value), - AttributeValue::F64(value) => serializer.serialize_f64(*value), - AttributeValue::String(value) => serializer.serialize_str(value), - } - } -} - -impl AttributeValue { - fn dyn_value(self: &AttributeValue) -> &dyn tracing::Value { - match self { - AttributeValue::Bool(value) => value as &dyn tracing::Value, - AttributeValue::U64(value) => value as &dyn tracing::Value, - AttributeValue::I64(value) => value as &dyn tracing::Value, - AttributeValue::F64(value) => value as &dyn tracing::Value, - AttributeValue::String(value) => value as &dyn tracing::Value, - } - } + metrics: HashMap)>, } impl Metrics { @@ -131,19 +98,12 @@ impl Metrics { let attr_name = stringify!([<$($attr __ )+>]).to_string(); match JsonPathInst::from_str($attr_path).expect("json path must be valid").find_slice(value).into_iter().next().as_deref() { // If the value is an object we can only state that it is set, but not what it is set to. - Some(Value::Object(_value)) => {attributes.insert(attr_name, AttributeValue::Bool(true));}, - Some(Value::Array(value)) if !value.is_empty() => {attributes.insert(attr_name, AttributeValue::Bool(true));}, + Some(Value::Object(_value)) => {attributes.insert(attr_name, "true".to_string());}, + Some(Value::Array(value)) if !value.is_empty() => {attributes.insert(attr_name, "true".to_string());}, // Scalars can be logged as is. - Some(Value::Number(value)) if value.is_f64() => {attributes.insert(attr_name, AttributeValue::F64(value.as_f64().expect("checked, qed")));}, - Some(Value::Number(value)) if value.is_i64() => {attributes.insert(attr_name, AttributeValue::I64(value.as_i64().expect("checked, qed")));}, - Some(Value::Number(value)) => {attributes.insert(attr_name, AttributeValue::U64(value.as_u64().expect("checked, qed")));}, - Some(Value::String(value)) => {attributes.insert(attr_name, AttributeValue::String(value.clone()));}, - Some(Value::Bool(value)) => {attributes.insert(attr_name, AttributeValue::Bool(*value));}, - + Some(value) => {attributes.insert(attr_name, value.to_string());}, // If the value is not set we don't specify the attribute. - None => {attributes.insert(attr_name, AttributeValue::Bool(false));}, - - _ => {}, + None => {attributes.insert(attr_name, "false".to_string());}, };)+ (1, attributes) } @@ -153,7 +113,7 @@ impl Metrics { let mut attributes = HashMap::new(); $( let attr_name = stringify!([<$($attr __ )+>]).to_string(); - attributes.insert(attr_name, AttributeValue::Bool(false)); + attributes.insert(attr_name, "false".to_string()); )+ (0, attributes) } @@ -162,7 +122,7 @@ impl Metrics { // Now log the metric paste!{ - tracing::info!($($metric).+ = metric.0, $($($attr).+ = metric.1.get(stringify!([<$($attr __ )+>])).expect("attribute must be in map").dyn_value()),+); + tracing::info!($($metric).+ = metric.0, $($($attr).+ = metric.1.get(stringify!([<$($attr __ )+>])).expect("attribute must be in map")),+); } }; } diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@apq.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@apq.router.yaml.snap index 9108dfc7a1..bf5efaf603 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@apq.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@apq.router.yaml.snap @@ -4,7 +4,7 @@ expression: "&metrics.metrics" --- value.apollo.router.config.apq: - 1 - - opt__router__cache__in_memory__: true - opt__router__cache__redis__: true - opt__subgraph__: true + - opt__router__cache__in_memory__: "true" + opt__router__cache__redis__: "true" + opt__subgraph__: "true" diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization.router.yaml.snap index e45a4962f7..11f9160614 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization.router.yaml.snap @@ -4,6 +4,6 @@ expression: "&metrics.metrics" --- value.apollo.router.config.authorization: - 1 - - opt__directives__: false - opt__require_authentication__: true + - opt__directives__: "false" + opt__require_authentication__: "true" diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization_directives.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization_directives.router.yaml.snap index 38462ec606..61b5d4c144 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization_directives.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization_directives.router.yaml.snap @@ -4,6 +4,6 @@ expression: "&metrics.metrics" --- value.apollo.router.config.authorization: - 1 - - opt__directives__: true - opt__require_authentication__: false + - opt__directives__: "true" + opt__require_authentication__: "false" diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@coprocessor.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@coprocessor.router.yaml.snap index bdc1a7899b..b5eb1df764 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@coprocessor.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@coprocessor.router.yaml.snap @@ -4,10 +4,10 @@ expression: "&metrics.metrics" --- value.apollo.router.config.coprocessor: - 1 - - opt__router__request__: true - opt__router__response__: true - opt__subgraph__request__: true - opt__subgraph__response__: true - opt__supergraph__request__: false - opt__supergraph__response__: false + - opt__router__request__: "true" + opt__router__response__: "true" + opt__subgraph__request__: "true" + opt__subgraph__response__: "true" + opt__supergraph__request__: "false" + opt__supergraph__response__: "false" diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@entities.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@entities.router.yaml.snap index e4fe10d957..1bce92d5c8 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@entities.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@entities.router.yaml.snap @@ -4,15 +4,15 @@ expression: "&metrics.metrics" --- value.apollo.router.config.entities: - 1 - - opt__cache__: true + - opt__cache__: "true" value.apollo.router.config.traffic_shaping: - 1 - - opt__router__rate_limit__: false - opt__router__timout__: false - opt__subgraph__compression__: false - opt__subgraph__deduplicate_query__: false - opt__subgraph__http2__: false - opt__subgraph__rate_limit__: false - opt__subgraph__retry__: false - opt__subgraph__timeout__: false + - opt__router__rate_limit__: "false" + opt__router__timout__: "false" + opt__subgraph__compression__: "false" + opt__subgraph__deduplicate_query__: "false" + opt__subgraph__http2__: "false" + opt__subgraph__rate_limit__: "false" + opt__subgraph__retry__: "false" + opt__subgraph__timeout__: "false" diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@limits.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@limits.router.yaml.snap index 055f60152d..53807bab66 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@limits.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@limits.router.yaml.snap @@ -4,12 +4,12 @@ expression: "&metrics.metrics" --- value.apollo.router.config.limits: - 1 - - opt__operation__max_aliases__: true - opt__operation__max_depth__: true - opt__operation__max_height__: true - opt__operation__max_root_fields__: true - opt__operation__warn_only__: true - opt__parser__max_recursion__: true - opt__parser__max_tokens__: true - opt__request__max_size__: true + - opt__operation__max_aliases__: "true" + opt__operation__max_depth__: "true" + opt__operation__max_height__: "true" + opt__operation__max_root_fields__: "true" + opt__operation__warn_only__: "true" + opt__parser__max_recursion__: "true" + opt__parser__max_tokens__: "true" + opt__request__max_size__: "true" diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@persisted_queries.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@persisted_queries.router.yaml.snap index 72b803ca49..507f9c756f 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@persisted_queries.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@persisted_queries.router.yaml.snap @@ -4,7 +4,7 @@ expression: "&metrics.metrics" --- value.apollo.router.config.persisted_queries: - 1 - - opt__log_unknown__: true - opt__safelist__enabled__: true - opt__safelist__require_id__: true + - opt__log_unknown__: "true" + opt__safelist__enabled__: "true" + opt__safelist__require_id__: "true" diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@subscriptions.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@subscriptions.router.yaml.snap index a019d34928..3709a1603d 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@subscriptions.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@subscriptions.router.yaml.snap @@ -4,9 +4,9 @@ expression: "&metrics.metrics" --- value.apollo.router.config.subscriptions: - 1 - - opt__deduplication__: false - opt__max_opened__: true - opt__mode__callback__: true - opt__mode__passthrough__: true - opt__queue_capacity__: true + - opt__deduplication__: "false" + opt__max_opened__: "true" + opt__mode__callback__: "true" + opt__mode__passthrough__: "true" + opt__queue_capacity__: "true" diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@telemetry.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@telemetry.router.yaml.snap index 8ea0c00cab..7e02cf7f31 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@telemetry.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@telemetry.router.yaml.snap @@ -4,10 +4,10 @@ expression: "&metrics.metrics" --- value.apollo.router.config.telemetry: - 1 - - opt__metrics__otlp__: true - opt__metrics__prometheus__: true - opt__tracing__datadog__: true - opt__tracing__jaeger__: true - opt__tracing__otlp__: true - opt__tracing__zipkin__: true + - opt__metrics__otlp__: "true" + opt__metrics__prometheus__: "true" + opt__tracing__datadog__: "true" + opt__tracing__jaeger__: "true" + opt__tracing__otlp__: "true" + opt__tracing__zipkin__: "true" diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@traffic_shaping.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@traffic_shaping.router.yaml.snap index ab53cd0460..1cdb685e7d 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@traffic_shaping.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@traffic_shaping.router.yaml.snap @@ -4,12 +4,12 @@ expression: "&metrics.metrics" --- value.apollo.router.config.traffic_shaping: - 1 - - opt__router__rate_limit__: true - opt__router__timout__: true - opt__subgraph__compression__: true - opt__subgraph__deduplicate_query__: true - opt__subgraph__http2__: true - opt__subgraph__rate_limit__: true - opt__subgraph__retry__: true - opt__subgraph__timeout__: true + - opt__router__rate_limit__: "true" + opt__router__timout__: "true" + opt__subgraph__compression__: "true" + opt__subgraph__deduplicate_query__: "true" + opt__subgraph__http2__: "true" + opt__subgraph__rate_limit__: "true" + opt__subgraph__retry__: "true" + opt__subgraph__timeout__: "true" diff --git a/apollo-router/src/plugins/telemetry/metrics/layer.rs b/apollo-router/src/plugins/telemetry/metrics/layer.rs index f19c1cd691..c195891d7b 100644 --- a/apollo-router/src/plugins/telemetry/metrics/layer.rs +++ b/apollo-router/src/plugins/telemetry/metrics/layer.rs @@ -24,6 +24,8 @@ use super::METRIC_PREFIX_HISTOGRAM; use super::METRIC_PREFIX_MONOTONIC_COUNTER; use super::METRIC_PREFIX_VALUE; +const I64_MAX: u64 = i64::MAX as u64; + #[derive(Default)] pub(crate) struct Instruments { u64_counter: MetricsMap>, @@ -157,243 +159,69 @@ pub(crate) struct MetricVisitor<'a> { pub(crate) metric: Option<(&'static str, InstrumentType)>, pub(crate) custom_attributes: Vec, pub(crate) meter: &'a Meter, - attributes_ignored: bool, -} - -impl<'a> MetricVisitor<'a> { - fn set_metric(&mut self, name: &'static str, instrument_type: InstrumentType) { - self.metric = Some((name, instrument_type)); - if self.attributes_ignored { - tracing::error!( - metric_name = name, - "metric attributes must be declared after the metric value. Some attributes have been ignored" - ); - } - } } impl<'a> Visit for MetricVisitor<'a> { - fn record_f64(&mut self, field: &Field, value: f64) { - if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { - self.set_metric(metric_name, InstrumentType::CounterF64(value)); - } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { - self.set_metric(metric_name, InstrumentType::UpDownCounterF64(value)); - } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { - self.set_metric(metric_name, InstrumentType::HistogramF64(value)); - } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { - tracing::error!( - metric_name, - "gauge must be u64. This metric will be ignored" - ); - } else if self.metric.is_some() { + fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { + // Do not display the log content + if field.name() != "message" { self.custom_attributes.push(KeyValue::new( Key::from_static_str(field.name()), - Value::from(value), + Value::from(format!("{value:?}")), )); - } else { - self.attributes_ignored = true } } - fn record_i64(&mut self, field: &Field, value: i64) { - if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { - tracing::error!( - metric_name, - "monotonic counter must be u64 or f64. This metric will be ignored" - ); - } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { - self.set_metric(metric_name, InstrumentType::UpDownCounterI64(value)); - } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { - self.set_metric(metric_name, InstrumentType::HistogramI64(value)); - } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { - tracing::error!( - metric_name, - "gauge must be u64. This metric will be ignored" - ); - } else if self.metric.is_some() { - self.custom_attributes.push(KeyValue::new( - Key::from_static_str(field.name()), - Value::from(value), - )); - } else { - self.attributes_ignored = true - } + fn record_str(&mut self, field: &Field, value: &str) { + self.custom_attributes.push(KeyValue::new( + Key::from_static_str(field.name()), + Value::from(value.to_string()), + )); } fn record_u64(&mut self, field: &Field, value: u64) { if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { - self.set_metric(metric_name, InstrumentType::CounterU64(value)); - } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { - tracing::error!( - metric_name, - "counter must be i64. This metric will be ignored" - ); - } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { - self.set_metric(metric_name, InstrumentType::HistogramU64(value)); - } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { - self.set_metric(metric_name, InstrumentType::GaugeU64(value)); - } else if self.metric.is_some() { - tracing::error!( - name = field.name(), - "metric attribute must be i64, f64, string or bool. This attribute will be ignored" - ); - } else { - self.attributes_ignored = true - } - } - - fn record_i128(&mut self, field: &Field, _value: i128) { - if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { - tracing::error!( - metric_name, - "monotonic counter must be u64 or f64. This metric will be ignored" - ); + self.metric = Some((metric_name, InstrumentType::CounterU64(value))); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { - tracing::error!( - metric_name, - "counter must be i64. This metric will be ignored" - ); - } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { - tracing::error!( - metric_name, - "histogram must be u64, i64 or f64. This metric will be ignored" - ); - } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { - tracing::error!( - metric_name, - "gauge must be u64. This metric will be ignored" - ); - } else if self.metric.is_some() { - tracing::error!( - name = field.name(), - "metric attribute must be i64, f64, string or bool. This attribute will be ignored" - ); - } else { - self.attributes_ignored = true - } - } - - fn record_u128(&mut self, field: &Field, _value: u128) { - if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { - tracing::error!( - metric_name, - "monotonic counter must be u64 or f64. This metric will be ignored" - ); - } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { - tracing::error!( - metric_name, - "counter must be i64. This metric will be ignored" - ); - } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { - tracing::error!( - metric_name, - "histogram must be u64, i64 or f64. This metric will be ignored" - ); - } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { - tracing::error!( - metric_name, - "gauge must be u64. This metric will be ignored" - ); - } else if self.metric.is_some() { - tracing::error!( - name = field.name(), - "metric attribute must be i64, f64, string or bool. This attribute will be ignored" - ); - } else { - self.attributes_ignored = true - } - } - - fn record_bool(&mut self, field: &Field, value: bool) { - if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { - tracing::error!( - metric_name, - "monotonic counter must be u64 or f64. This metric will be ignored" - ); - } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { - tracing::error!( - metric_name, - "counter must be i64. This metric will be ignored" - ); + if value <= I64_MAX { + self.metric = Some((metric_name, InstrumentType::UpDownCounterI64(value as i64))); + } else { + eprintln!( + "[tracing-opentelemetry]: Received Counter metric, but \ + provided u64: {value} is greater than i64::MAX. Ignoring \ + this metric." + ); + } } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { - tracing::error!( - metric_name, - "histogram must be u64, i64 or f64. This metric will be ignored" - ); + self.metric = Some((metric_name, InstrumentType::HistogramU64(value))); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { - tracing::error!( - metric_name, - "gauge must be u64. This metric will be ignored" - ); - } else if self.metric.is_some() { - self.custom_attributes.push(KeyValue::new( - Key::from_static_str(field.name()), - Value::from(value), - )); + self.metric = Some((metric_name, InstrumentType::GaugeU64(value))); } else { - self.attributes_ignored = true + self.record_debug(field, &value); } } - fn record_str(&mut self, field: &Field, value: &str) { + fn record_f64(&mut self, field: &Field, value: f64) { if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { - tracing::error!( - metric_name, - "monotonic counter must be u64 or f64. This metric will be ignored" - ); + self.metric = Some((metric_name, InstrumentType::CounterF64(value))); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { - tracing::error!( - metric_name, - "counter must be i64. This metric will be ignored" - ); + self.metric = Some((metric_name, InstrumentType::UpDownCounterF64(value))); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { - tracing::error!( - metric_name, - "histogram must be u64, i64 or f64. This metric will be ignored" - ); - } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { - tracing::error!( - metric_name, - "gauge must be u64. This metric will be ignored" - ); - } else if self.metric.is_some() { - self.custom_attributes.push(KeyValue::new( - Key::from_static_str(field.name()), - Value::from(value.to_string()), - )); + self.metric = Some((metric_name, InstrumentType::HistogramF64(value))); } else { - self.attributes_ignored = true + self.record_debug(field, &value); } } - fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { + fn record_i64(&mut self, field: &Field, value: i64) { if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { - tracing::error!( - metric_name, - "monotonic counter must be u64 or f64. This metric will be ignored" - ); + self.metric = Some((metric_name, InstrumentType::CounterU64(value as u64))); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { - tracing::error!( - metric_name, - "counter must be i64. This metric will be ignored" - ); + self.metric = Some((metric_name, InstrumentType::UpDownCounterI64(value))); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { - tracing::error!( - metric_name, - "histogram must be u64, i64 or f64. This metric will be ignored" - ); - } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { - tracing::error!( - metric_name, - "gauge must be u64. This metric will be ignored" - ); - } else if self.metric.is_some() { - self.custom_attributes.push(KeyValue::new( - Key::from_static_str(field.name()), - Value::from(format!("{value:?}")), - )); + self.metric = Some((metric_name, InstrumentType::HistogramI64(value))); } else { - self.attributes_ignored = true + self.record_debug(field, &value); } } } @@ -437,7 +265,6 @@ where meter: &self.meter, metric: None, custom_attributes: Vec::new(), - attributes_ignored: false, }; event.record(&mut metric_visitor); metric_visitor.finish(); diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index 14cfbd1d5c..b61d5fbf21 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -840,7 +840,7 @@ impl Telemetry { } ::tracing::info!( monotonic_counter.apollo.router.operations = 1u64, - http.response.status_code = parts.status.as_u16() as i64, + http.response.status_code = parts.status.as_u16(), ); let response = http::Response::from_parts( parts, diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs index 7805d127f9..e4929420de 100644 --- a/apollo-router/src/query_planner/bridge_query_planner.rs +++ b/apollo-router/src/query_planner/bridge_query_planner.rs @@ -91,7 +91,7 @@ impl BridgeQueryPlanner { if has_validation_errors && !schema.has_errors() { tracing::warn!( - monotonic_counter.apollo.router.validation = 1u64, + monotonic_counter.apollo.router.validation = 1, validation.source = VALIDATION_SOURCE_SCHEMA, validation.result = VALIDATION_FALSE_NEGATIVE, "validation mismatch: JS query planner reported a schema validation error, but apollo-rs did not" @@ -106,7 +106,7 @@ impl BridgeQueryPlanner { if configuration.experimental_graphql_validation_mode == GraphQLValidationMode::Both { if schema.has_errors() { tracing::warn!( - monotonic_counter.apollo.router.validation = 1u64, + monotonic_counter.apollo.router.validation = 1, validation.source = VALIDATION_SOURCE_SCHEMA, validation.result = VALIDATION_FALSE_POSITIVE, "validation mismatch: apollo-rs reported a schema validation error, but JS query planner did not" @@ -114,7 +114,7 @@ impl BridgeQueryPlanner { } else { // false_negative was an early return so we know it was correct here tracing::info!( - monotonic_counter.apollo.router.validation = 1u64, + monotonic_counter.apollo.router.validation = 1, validation.source = VALIDATION_SOURCE_SCHEMA, validation.result = VALIDATION_MATCH ); @@ -286,7 +286,7 @@ impl BridgeQueryPlanner { match (is_validation_error, &selections.validation_error) { (false, Some(_)) => { tracing::warn!( - monotonic_counter.apollo.router.validation = 1u64, + monotonic_counter.apollo.router.validation = 1, validation.source = VALIDATION_SOURCE_OPERATION, validation.result = VALIDATION_FALSE_POSITIVE, "validation mismatch: JS query planner did not report query validation error, but apollo-rs did" @@ -294,7 +294,7 @@ impl BridgeQueryPlanner { } (true, None) => { tracing::warn!( - monotonic_counter.apollo.router.validation = 1u64, + monotonic_counter.apollo.router.validation = 1, validation.source = VALIDATION_SOURCE_OPERATION, validation.result = VALIDATION_FALSE_NEGATIVE, "validation mismatch: apollo-rs did not report query validation error, but JS query planner did" @@ -302,7 +302,7 @@ impl BridgeQueryPlanner { } // if JS and Rust implementations agree, we return the JS result for now. _ => tracing::info!( - monotonic_counter.apollo.router.validation = 1u64, + monotonic_counter.apollo.router.validation = 1, validation.source = VALIDATION_SOURCE_OPERATION, validation.result = VALIDATION_MATCH, ), From b60304bf20db15a3dae510d7b9ab9c7b6a930a8b Mon Sep 17 00:00:00 2001 From: o0Ignition0o Date: Fri, 1 Sep 2023 17:13:42 +0200 Subject: [PATCH 21/81] Fix monotonic counter type This fixes a bug where you would encounter error logs: monotonic counter must be u64 or f64. This metric will be ignored. --- apollo-router/src/plugins/authentication/mod.rs | 2 +- apollo-router/src/query_planner/execution.rs | 2 +- apollo-router/src/query_planner/fetch.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/apollo-router/src/plugins/authentication/mod.rs b/apollo-router/src/plugins/authentication/mod.rs index 3fab8ed230..b90c814b7a 100644 --- a/apollo-router/src/plugins/authentication/mod.rs +++ b/apollo-router/src/plugins/authentication/mod.rs @@ -627,7 +627,7 @@ fn authenticate( monotonic_counter.apollo_authentication_success_count = 1u64, kind = %AUTHENTICATION_KIND ); - tracing::info!(monotonic_counter.apollo.router.operations.jwt = 1); + tracing::info!(monotonic_counter.apollo.router.operations.jwt = 1u64); return Ok(ControlFlow::Continue(request)); } diff --git a/apollo-router/src/query_planner/execution.rs b/apollo-router/src/query_planner/execution.rs index a97f0fc06c..0cb52f333f 100644 --- a/apollo-router/src/query_planner/execution.rs +++ b/apollo-router/src/query_planner/execution.rs @@ -76,7 +76,7 @@ impl QueryPlan { ) .await; if !deferred_fetches.is_empty() { - tracing::info!(monotonic_counter.apollo.router.operations.defer = 1); + tracing::info!(monotonic_counter.apollo.router.operations.defer = 1u64); } Response::builder().data(value).errors(errors).build() diff --git a/apollo-router/src/query_planner/fetch.rs b/apollo-router/src/query_planner/fetch.rs index 83509ee3be..41e2b233e3 100644 --- a/apollo-router/src/query_planner/fetch.rs +++ b/apollo-router/src/query_planner/fetch.rs @@ -290,7 +290,7 @@ impl FetchNode { self.response_at_path(parameters.schema, current_dir, paths, response); if let Some(id) = &self.id { if let Some(sender) = parameters.deferred_fetches.get(id.as_str()) { - tracing::info!(monotonic_counter.apollo.router.operations.defer.fetch = 1); + tracing::info!(monotonic_counter.apollo.router.operations.defer.fetch = 1u64); if let Err(e) = sender.clone().send((value.clone(), errors.clone())) { tracing::error!("error sending fetch result at path {} and id {:?} for deferred response building: {}", current_dir, self.id, e); } From 7a58ea47c2c6b893b86b7207a28af883195c27a4 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 2 Sep 2023 16:21:21 +0000 Subject: [PATCH 22/81] fix(deps): update rust crate regex to 1.9.5 --- Cargo.lock | 10 +++++----- apollo-router/Cargo.toml | 2 +- xtask/Cargo.lock | 12 ++++++------ xtask/Cargo.toml | 2 +- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1c570d7ec0..f1793dfbfc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4975,13 +4975,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12de2eff854e5fa4b1295edd650e227e9d8fb0c9e90b12e7f36d6a6811791a29" +checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.3.7", + "regex-automata 0.3.8", "regex-syntax 0.7.5", ] @@ -4996,9 +4996,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49530408a136e16e5b486e883fbb6ba058e8e4e8ae6621a77b048b314336e629" +checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" dependencies = [ "aho-corasick", "memchr", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 8b25903d3a..5ab613a39a 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -163,7 +163,7 @@ prost-types = "0.11.9" proteus = "0.5.0" rand = "0.8.5" rhai = { version = "1.15.1", features = ["sync", "serde", "internals"] } -regex = "1.9.4" +regex = "1.9.5" reqwest = { version = "0.11.19", default-features = false, features = [ "rustls-tls", "rustls-native-certs", diff --git a/xtask/Cargo.lock b/xtask/Cargo.lock index d11f5724b0..bdf077abae 100644 --- a/xtask/Cargo.lock +++ b/xtask/Cargo.lock @@ -834,9 +834,9 @@ checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "memchr" -version = "2.5.0" +version = "2.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "5486aed0026218e61b8a01d5fbd5a0a134649abb71a0e53b7bc088529dced86e" [[package]] name = "memorable-wordlist" @@ -1060,9 +1060,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12de2eff854e5fa4b1295edd650e227e9d8fb0c9e90b12e7f36d6a6811791a29" +checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" dependencies = [ "aho-corasick", "memchr", @@ -1072,9 +1072,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49530408a136e16e5b486e883fbb6ba058e8e4e8ae6621a77b048b314336e629" +checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" dependencies = [ "aho-corasick", "memchr", diff --git a/xtask/Cargo.toml b/xtask/Cargo.toml index 840f2a9283..b19d08afac 100644 --- a/xtask/Cargo.toml +++ b/xtask/Cargo.toml @@ -26,7 +26,7 @@ libc = "0.2" memorable-wordlist = "0.1.7" nu-ansi-term = "0.49" once_cell = "1" -regex = "1.9.4" +regex = "1.9.5" reqwest = { version = "0.11", default-features = false, features = [ "blocking", "rustls-tls", From 22bf32ea5b14206905c9fb191327b40b22f40da8 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 2 Sep 2023 23:20:36 +0000 Subject: [PATCH 23/81] chore(deps): update all non-major packages >= 1.0 --- Cargo.lock | 12 ++++++------ apollo-router/Cargo.toml | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f1793dfbfc..fc9568d242 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3716,9 +3716,9 @@ checksum = "8c408dc227d302f1496c84d9dc68c00fec6f56f9228a18f3023f976f3ca7c945" [[package]] name = "memchr" -version = "2.6.2" +version = "2.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5486aed0026218e61b8a01d5fbd5a0a134649abb71a0e53b7bc088529dced86e" +checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" [[package]] name = "memoffset" @@ -6156,18 +6156,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.47" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97a802ec30afc17eee47b2855fc72e0c4cd62be9b4efe6591edde0ec5bd68d8f" +checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.47" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bb623b56e39ab7dcd4b1b98bb6c8f8d907ed255b18de254088016b27a8ee19b" +checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" dependencies = [ "proc-macro2", "quote", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 5ab613a39a..1334b2f0aa 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -189,7 +189,7 @@ serde_yaml = "0.8.26" static_assertions = "1.1.0" strum_macros = "0.25.2" sys-info = "0.9.1" -thiserror = "1.0.47" +thiserror = "1.0.48" tokio = { version = "1.32.0", features = ["full"] } tokio-stream = { version = "0.1.14", features = ["sync", "net"] } tokio-util = { version = "0.7.8", features = ["net", "codec", "time"] } @@ -231,7 +231,7 @@ tokio-rustls = "0.24.1" http-serde = "1.1.3" hmac = "0.12.1" parking_lot = "0.12.1" -memchr = "2.6.2" +memchr = "2.6.3" brotli = "3.3.4" zstd = "0.12.4" zstd-safe = "6.0.6" @@ -265,7 +265,7 @@ futures-test = "0.3.28" insta = { version = "1.31.0", features = ["json", "redactions", "yaml"] } introspector-gadget = "0.2.2" maplit = "1.0.2" -memchr = { version = "2.6.2", default-features = false } +memchr = { version = "2.6.3", default-features = false } mockall = "0.11.4" once_cell = "1.18.0" p256 = "0.12.0" From 2c3a22cb496d36406e244d77ee90d5435e7e5030 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Mon, 4 Sep 2023 16:19:46 +0200 Subject: [PATCH 24/81] small performance improvements for telemetry (#3656) * check span names from a HashSet instead of iterating through an array of strings * move the SpanMetricsExporter to a tracing Layer (#3669) SpanMetricsExporter is an OpenTelemetry Exporter, which means that if it is loaded, then the entire OpenTelemetry handling infrastructure is loaded, and especially the part that allocates data for the entire list of spans. Unfortunately, the SpanMetricsExporter was always loaded, even if we do not export the metrics outside of the router, which means that there's a constant overhead of telemetry even when it is not used. This moves the SpanMetricsExporter to a lightweight tracing-subscriber Layer which performs the same busy/idle accounting as OpenTelemetryLayer, then generates the same events as before when the span closes --- .changesets/fix_geal_telemetry_perf.md | 5 + .../metrics/span_metrics_exporter.rs | 191 ++++++++++++------ apollo-router/src/plugins/telemetry/mod.rs | 7 +- apollo-router/src/plugins/telemetry/reload.rs | 12 +- 4 files changed, 142 insertions(+), 73 deletions(-) create mode 100644 .changesets/fix_geal_telemetry_perf.md diff --git a/.changesets/fix_geal_telemetry_perf.md b/.changesets/fix_geal_telemetry_perf.md new file mode 100644 index 0000000000..f20ceee31a --- /dev/null +++ b/.changesets/fix_geal_telemetry_perf.md @@ -0,0 +1,5 @@ +### small performance improvements for telemetry ([PR #3656](https://github.com/apollographql/router/pull/3656)) + +The SpanMetricsExporter, used to report span timings hade a few inefficiencies in the way it recognized spans, and it brought a constant overhead to the router usage, even when telemetry was not configured. It has now been isolated and optimized + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3656 \ No newline at end of file diff --git a/apollo-router/src/plugins/telemetry/metrics/span_metrics_exporter.rs b/apollo-router/src/plugins/telemetry/metrics/span_metrics_exporter.rs index 1c2aa6642a..5e6778ab74 100644 --- a/apollo-router/src/plugins/telemetry/metrics/span_metrics_exporter.rs +++ b/apollo-router/src/plugins/telemetry/metrics/span_metrics_exporter.rs @@ -1,11 +1,13 @@ -use async_trait::async_trait; -use futures::future::BoxFuture; -use futures::FutureExt; -use opentelemetry::sdk::export::trace::ExportResult; -use opentelemetry::sdk::export::trace::SpanData; -use opentelemetry::sdk::export::trace::SpanExporter; -use opentelemetry::Key; -use opentelemetry::Value; +use std::collections::HashSet; +use std::time::Instant; + +use tracing_core::field::Visit; +use tracing_core::span; +use tracing_core::Field; +use tracing_core::Subscriber; +use tracing_subscriber::layer::Context; +use tracing_subscriber::registry::LookupSpan; +use tracing_subscriber::Layer; use crate::axum_factory::utils::REQUEST_SPAN_NAME; use crate::plugins::telemetry::EXECUTION_SPAN_NAME; @@ -13,69 +15,126 @@ use crate::plugins::telemetry::SUBGRAPH_SPAN_NAME; use crate::plugins::telemetry::SUPERGRAPH_SPAN_NAME; use crate::services::QUERY_PLANNING_SPAN_NAME; -const SPAN_NAMES: &[&str] = &[ - REQUEST_SPAN_NAME, - SUPERGRAPH_SPAN_NAME, - SUBGRAPH_SPAN_NAME, - QUERY_PLANNING_SPAN_NAME, - EXECUTION_SPAN_NAME, -]; - -const BUSY_NS_ATTRIBUTE_NAME: Key = Key::from_static_str("busy_ns"); -const IDLE_NS_ATTRIBUTE_NAME: Key = Key::from_static_str("idle_ns"); -const SUBGRAPH_ATTRIBUTE_NAME: Key = Key::from_static_str("apollo.subgraph.name"); - -#[derive(Debug, Default)] -pub(crate) struct Exporter {} -#[async_trait] -impl SpanExporter for Exporter { - /// Export spans metrics to real metrics - fn export(&mut self, batch: Vec) -> BoxFuture<'static, ExportResult> { - for span in batch - .into_iter() - .filter(|s| SPAN_NAMES.contains(&s.name.as_ref())) - { - let busy = span - .attributes - .get(&BUSY_NS_ATTRIBUTE_NAME) - .and_then(|attr| match attr { - Value::I64(v) => Some(*v), - _ => None, - }) - .unwrap_or_default(); - let idle = span - .attributes - .get(&IDLE_NS_ATTRIBUTE_NAME) - .and_then(|attr| match attr { - Value::I64(v) => Some(*v), - _ => None, - }) - .unwrap_or_default(); - let duration = span - .end_time - .duration_since(span.start_time) - .unwrap_or_default() - .as_secs_f64(); +const SUBGRAPH_ATTRIBUTE_NAME: &str = "apollo.subgraph.name"; + +#[derive(Debug)] +pub(crate) struct SpanMetricsLayer { + span_names: HashSet<&'static str>, +} + +impl Default for SpanMetricsLayer { + fn default() -> Self { + Self { + span_names: [ + REQUEST_SPAN_NAME, + SUPERGRAPH_SPAN_NAME, + SUBGRAPH_SPAN_NAME, + QUERY_PLANNING_SPAN_NAME, + EXECUTION_SPAN_NAME, + ] + .into(), + } + } +} + +impl Layer for SpanMetricsLayer +where + S: Subscriber + for<'span> LookupSpan<'span>, +{ + fn on_new_span(&self, attrs: &span::Attributes<'_>, id: &span::Id, ctx: Context<'_, S>) { + let span = ctx.span(id).expect("Span not found, this is a bug"); + let mut extensions = span.extensions_mut(); + + let name = attrs.metadata().name(); + if self.span_names.contains(name) && extensions.get_mut::().is_none() { + let mut timings = Timings::new(); + if name == SUBGRAPH_SPAN_NAME { + attrs.values().record(&mut ValueVisitor { + timings: &mut timings, + }); + } + extensions.insert(Timings::new()); + } + } + + fn on_record(&self, _span: &span::Id, _values: &span::Record<'_>, _ctx: Context<'_, S>) {} + + fn on_close(&self, id: span::Id, ctx: Context<'_, S>) { + let span = ctx.span(&id).expect("Span not found, this is a bug"); + let mut extensions = span.extensions_mut(); + + if let Some(timings) = extensions.get_mut::() { + let duration = timings.start.elapsed().as_secs_f64(); // Convert it in seconds - let idle: f64 = idle as f64 / 1_000_000_000_f64; - let busy: f64 = busy as f64 / 1_000_000_000_f64; - if span.name == SUBGRAPH_SPAN_NAME { - let subgraph_name = span - .attributes - .get(&SUBGRAPH_ATTRIBUTE_NAME) - .map(|name| name.as_str()) - .unwrap_or_default(); - ::tracing::info!(histogram.apollo_router_span = duration, kind = %"duration", span = %span.name, subgraph = %subgraph_name); - ::tracing::info!(histogram.apollo_router_span = idle, kind = %"idle", span = %span.name, subgraph = %subgraph_name); - ::tracing::info!(histogram.apollo_router_span = busy, kind = %"busy", span = %span.name, subgraph = %subgraph_name); + let idle: f64 = timings.idle as f64 / 1_000_000_000_f64; + let busy: f64 = timings.busy as f64 / 1_000_000_000_f64; + let name = span.metadata().name(); + if let Some(subgraph_name) = timings.subgraph.take() { + ::tracing::info!(histogram.apollo_router_span = duration, kind = %"duration", span = %name, subgraph = %subgraph_name); + ::tracing::info!(histogram.apollo_router_span = idle, kind = %"idle", span = %name, subgraph = %subgraph_name); + ::tracing::info!(histogram.apollo_router_span = busy, kind = %"busy", span = %name, subgraph = %subgraph_name); } else { - ::tracing::info!(histogram.apollo_router_span = duration, kind = %"duration", span = %span.name); - ::tracing::info!(histogram.apollo_router_span = idle, kind = %"idle", span = %span.name); - ::tracing::info!(histogram.apollo_router_span = busy, kind = %"busy", span = %span.name); + ::tracing::info!(histogram.apollo_router_span = duration, kind = %"duration", span = %name); + ::tracing::info!(histogram.apollo_router_span = idle, kind = %"idle", span = %name); + ::tracing::info!(histogram.apollo_router_span = busy, kind = %"busy", span = %name); } } + } - async { Ok(()) }.boxed() + fn on_enter(&self, id: &span::Id, ctx: Context<'_, S>) { + let span = ctx.span(id).expect("Span not found, this is a bug"); + let mut extensions = span.extensions_mut(); + + if let Some(timings) = extensions.get_mut::() { + let now = Instant::now(); + timings.idle += (now - timings.last).as_nanos() as i64; + timings.last = now; + } + } + + fn on_exit(&self, id: &span::Id, ctx: Context<'_, S>) { + let span = ctx.span(id).expect("Span not found, this is a bug"); + let mut extensions = span.extensions_mut(); + + if let Some(timings) = extensions.get_mut::() { + let now = Instant::now(); + timings.busy += (now - timings.last).as_nanos() as i64; + timings.last = now; + } + } +} + +struct Timings { + idle: i64, + busy: i64, + last: Instant, + start: Instant, + subgraph: Option, +} + +impl Timings { + fn new() -> Self { + Self { + idle: 0, + busy: 0, + last: Instant::now(), + start: Instant::now(), + subgraph: None, + } + } +} + +struct ValueVisitor<'a> { + timings: &'a mut Timings, +} + +impl<'a> Visit for ValueVisitor<'a> { + fn record_debug(&mut self, _field: &Field, _value: &dyn std::fmt::Debug) {} + + fn record_str(&mut self, field: &Field, value: &str) { + if field.name() == SUBGRAPH_ATTRIBUTE_NAME { + self.timings.subgraph = Some(value.to_string()); + } } } diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index b61d5fbf21..f6a8ba5202 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -66,6 +66,7 @@ use self::metrics::AttributesForwardConf; use self::metrics::MetricsAttributesConf; use self::reload::reload_fmt; use self::reload::reload_metrics; +use self::reload::LayeredRegistry; use self::reload::NullFieldFormatter; use self::reload::OPENTELEMETRY_TRACER_HANDLE; use self::tracing::apollo_telemetry::APOLLO_PRIVATE_DURATION_NS; @@ -622,8 +623,6 @@ impl Telemetry { builder = setup_tracing(builder, &tracing_config.datadog, trace_config)?; builder = setup_tracing(builder, &tracing_config.otlp, trace_config)?; builder = setup_tracing(builder, &config.apollo, trace_config)?; - // For metrics - builder = builder.with_simple_exporter(metrics::span_metrics_exporter::Exporter::default()); let tracer_provider = builder.build(); Ok(tracer_provider) @@ -672,10 +671,10 @@ impl Telemetry { dyn Layer< ::tracing_subscriber::layer::Layered< OpenTelemetryLayer< - ::tracing_subscriber::Registry, + LayeredRegistry, ReloadTracer<::opentelemetry::sdk::trace::Tracer>, >, - ::tracing_subscriber::Registry, + LayeredRegistry, >, > + Send + Sync, diff --git a/apollo-router/src/plugins/telemetry/reload.rs b/apollo-router/src/plugins/telemetry/reload.rs index 1c66ccf4ef..50ce48747a 100644 --- a/apollo-router/src/plugins/telemetry/reload.rs +++ b/apollo-router/src/plugins/telemetry/reload.rs @@ -15,6 +15,7 @@ use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::EnvFilter; use tracing_subscriber::Registry; +use super::metrics::span_metrics_exporter::SpanMetricsLayer; use crate::plugins::telemetry::formatters::filter_metric_events; use crate::plugins::telemetry::formatters::text::TextFormatter; use crate::plugins::telemetry::formatters::FilteringFormatter; @@ -22,7 +23,10 @@ use crate::plugins::telemetry::metrics; use crate::plugins::telemetry::metrics::layer::MetricsLayer; use crate::plugins::telemetry::tracing::reload::ReloadTracer; -type LayeredTracer = Layered>, Registry>; +pub(crate) type LayeredRegistry = Layered; + +type LayeredTracer = + Layered>, LayeredRegistry>; // These handles allow hot tracing of layers. They have complex type definitions because tracing has // generic types in the layer definition. @@ -96,6 +100,7 @@ pub(crate) fn init_telemetry(log_level: &str) -> Result<()> { // Env filter is separate because of https://github.com/tokio-rs/tracing/issues/1629 // the tracing registry is only created once tracing_subscriber::registry() + .with(SpanMetricsLayer::default()) .with(opentelemetry_layer) .with(fmt_layer) .with(metrics_layer) @@ -128,8 +133,9 @@ pub(super) fn reload_metrics(layer: MetricsLayer) { #[allow(clippy::type_complexity)] pub(super) fn reload_fmt( layer: Box< - dyn Layer>, Registry>> - + Send + dyn Layer< + Layered>, LayeredRegistry>, + > + Send + Sync, >, ) { From 35be1cce31f6f1d3475b6a7843c89c68a5750947 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Mon, 4 Sep 2023 16:29:51 +0200 Subject: [PATCH 25/81] remove clones from the header plugin (#3721) Fix #3068 the operations were cloned for every subgraph query, this is a bit wasteful so we create them only once, and store them under an Arc --- ..._geal_remove_clones_from_headers_plugin.md | 5 + apollo-router/src/plugins/headers.rs | 132 ++++++++++-------- 2 files changed, 82 insertions(+), 55 deletions(-) create mode 100644 .changesets/fix_geal_remove_clones_from_headers_plugin.md diff --git a/.changesets/fix_geal_remove_clones_from_headers_plugin.md b/.changesets/fix_geal_remove_clones_from_headers_plugin.md new file mode 100644 index 0000000000..36881a1de4 --- /dev/null +++ b/.changesets/fix_geal_remove_clones_from_headers_plugin.md @@ -0,0 +1,5 @@ +### remove clones from the header plugin ([Issue #3068](https://github.com/apollographql/router/issues/3068)) + +The list of header operations was cloned for every subgraph query, and this was increasing latency. We made sure the overhead is minimal by removing those allocations + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3721 \ No newline at end of file diff --git a/apollo-router/src/plugins/headers.rs b/apollo-router/src/plugins/headers.rs index 7a6e56d4ac..074cafd5e8 100644 --- a/apollo-router/src/plugins/headers.rs +++ b/apollo-router/src/plugins/headers.rs @@ -1,4 +1,5 @@ use std::collections::HashMap; +use std::sync::Arc; use std::task::Context; use std::task::Poll; @@ -187,7 +188,8 @@ struct Config { } struct Headers { - config: Config, + all_operations: Arc>, + subgraph_operations: HashMap>>, } #[async_trait::async_trait] @@ -195,36 +197,48 @@ impl Plugin for Headers { type Config = Config; async fn new(init: PluginInit) -> Result { - Ok(Headers { - config: init.config, - }) - } - fn subgraph_service(&self, name: &str, service: subgraph::BoxService) -> subgraph::BoxService { - let mut operations: Vec = self + let operations: Vec = init .config .all .as_ref() .map(|a| a.request.clone()) .unwrap_or_default(); - if let Some(mut subgraph_operations) = - self.config.subgraphs.get(name).map(|s| s.request.clone()) - { - operations.append(&mut subgraph_operations); - } + let subgraph_operations = init + .config + .subgraphs + .iter() + .map(|(subgraph_name, op)| { + let mut operations = operations.clone(); + operations.append(&mut op.request.clone()); + (subgraph_name.clone(), Arc::new(operations)) + }) + .collect(); + Ok(Headers { + all_operations: Arc::new(operations), + subgraph_operations, + }) + } + + fn subgraph_service(&self, name: &str, service: subgraph::BoxService) -> subgraph::BoxService { ServiceBuilder::new() - .layer(HeadersLayer::new(operations)) + .layer(HeadersLayer::new( + self.subgraph_operations + .get(name) + .cloned() + .unwrap_or_else(|| self.all_operations.clone()), + )) .service(service) .boxed() } } struct HeadersLayer { - operations: Vec, + operations: Arc>, } impl HeadersLayer { - fn new(operations: Vec) -> Self { + fn new(operations: Arc>) -> Self { Self { operations } } } @@ -241,7 +255,7 @@ impl Layer for HeadersLayer { } struct HeadersService { inner: S, - operations: Vec, + operations: Arc>, } lazy_static! { @@ -279,7 +293,7 @@ where } fn call(&mut self, mut req: SubgraphRequest) -> Self::Future { - for operation in &self.operations { + for operation in &*self.operations { match operation { Operation::Insert(insert_config) => match insert_config { Insert::Static(static_insert) => { @@ -523,12 +537,13 @@ mod test { }) .returning(example_response); - let mut service = - HeadersLayer::new(vec![Operation::Insert(Insert::Static(InsertStatic { + let mut service = HeadersLayer::new(Arc::new(vec![Operation::Insert(Insert::Static( + InsertStatic { name: "c".try_into()?, value: "d".try_into()?, - }))]) - .layer(mock); + }, + ))])) + .layer(mock); service.ready().await?.call(example_request()).await?; Ok(()) @@ -549,12 +564,12 @@ mod test { }) .returning(example_response); - let mut service = HeadersLayer::new(vec![Operation::Insert(Insert::FromContext( - InsertFromContext { + let mut service = HeadersLayer::new(Arc::new(vec![Operation::Insert( + Insert::FromContext(InsertFromContext { name: "header_from_context".try_into()?, from_context: "my_key".to_string(), - }, - ))]) + }), + )])) .layer(mock); service.ready().await?.call(example_request()).await?; @@ -576,13 +591,14 @@ mod test { }) .returning(example_response); - let mut service = - HeadersLayer::new(vec![Operation::Insert(Insert::FromBody(InsertFromBody { + let mut service = HeadersLayer::new(Arc::new(vec![Operation::Insert(Insert::FromBody( + InsertFromBody { name: "header_from_request".try_into()?, path: JSONQuery::parse(".operationName")?, default: None, - }))]) - .layer(mock); + }, + ))])) + .layer(mock); service.ready().await?.call(example_request()).await?; Ok(()) @@ -596,8 +612,10 @@ mod test { .withf(|request| request.assert_headers(vec![("ac", "vac"), ("ab", "vab")])) .returning(example_response); - let mut service = - HeadersLayer::new(vec![Operation::Remove(Remove::Named("aa".try_into()?))]).layer(mock); + let mut service = HeadersLayer::new(Arc::new(vec![Operation::Remove(Remove::Named( + "aa".try_into()?, + ))])) + .layer(mock); service.ready().await?.call(example_request()).await?; Ok(()) @@ -611,9 +629,9 @@ mod test { .withf(|request| request.assert_headers(vec![("ac", "vac")])) .returning(example_response); - let mut service = HeadersLayer::new(vec![Operation::Remove(Remove::Matching( + let mut service = HeadersLayer::new(Arc::new(vec![Operation::Remove(Remove::Matching( Regex::from_str("a[ab]")?, - ))]) + ))])) .layer(mock); service.ready().await?.call(example_request()).await?; @@ -636,10 +654,11 @@ mod test { }) .returning(example_response); - let mut service = HeadersLayer::new(vec![Operation::Propagate(Propagate::Matching { - matching: Regex::from_str("d[ab]")?, - })]) - .layer(mock); + let mut service = + HeadersLayer::new(Arc::new(vec![Operation::Propagate(Propagate::Matching { + matching: Regex::from_str("d[ab]")?, + })])) + .layer(mock); service.ready().await?.call(example_request()).await?; Ok(()) @@ -660,12 +679,13 @@ mod test { }) .returning(example_response); - let mut service = HeadersLayer::new(vec![Operation::Propagate(Propagate::Named { - named: "da".try_into()?, - rename: None, - default: None, - })]) - .layer(mock); + let mut service = + HeadersLayer::new(Arc::new(vec![Operation::Propagate(Propagate::Named { + named: "da".try_into()?, + rename: None, + default: None, + })])) + .layer(mock); service.ready().await?.call(example_request()).await?; Ok(()) @@ -686,12 +706,13 @@ mod test { }) .returning(example_response); - let mut service = HeadersLayer::new(vec![Operation::Propagate(Propagate::Named { - named: "da".try_into()?, - rename: Some("ea".try_into()?), - default: None, - })]) - .layer(mock); + let mut service = + HeadersLayer::new(Arc::new(vec![Operation::Propagate(Propagate::Named { + named: "da".try_into()?, + rename: Some("ea".try_into()?), + default: None, + })])) + .layer(mock); service.ready().await?.call(example_request()).await?; Ok(()) @@ -712,12 +733,13 @@ mod test { }) .returning(example_response); - let mut service = HeadersLayer::new(vec![Operation::Propagate(Propagate::Named { - named: "ea".try_into()?, - rename: None, - default: Some("defaulted".try_into()?), - })]) - .layer(mock); + let mut service = + HeadersLayer::new(Arc::new(vec![Operation::Propagate(Propagate::Named { + named: "ea".try_into()?, + rename: None, + default: Some("defaulted".try_into()?), + })])) + .layer(mock); service.ready().await?.call(example_request()).await?; Ok(()) From 058322cd3d6f5d724a1eb26aa2169351a8a905ec Mon Sep 17 00:00:00 2001 From: Avery Harnish Date: Mon, 4 Sep 2023 09:37:04 -0500 Subject: [PATCH 26/81] chore: removes unused introspector-gadget crate (#3709) This PR removes the `introspector-gadget` dependency from the `Cargo.toml` as I don't believe it's used by this crate anymore. If this passes I think I'd like to deprecate that crate and move it fully back into the Rover repository so it's easier to make changes to its implementation. --- Cargo.lock | 43 ++-------------------------------------- apollo-router/Cargo.toml | 1 - 2 files changed, 2 insertions(+), 42 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fc9568d242..aa06341378 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -230,16 +230,6 @@ dependencies = [ "uuid", ] -[[package]] -name = "apollo-encoder" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a67cf5282faa8a66c848e1f4aef139c3cfe307025029983d05b80f8360f41e8" -dependencies = [ - "apollo-parser 0.5.3", - "thiserror", -] - [[package]] name = "apollo-encoder" version = "0.7.0" @@ -278,7 +268,7 @@ dependencies = [ "access-json", "anyhow", "apollo-compiler 0.11.1", - "apollo-encoder 0.7.0", + "apollo-encoder", "apollo-parser 0.6.1", "arc-swap", "askama", @@ -322,7 +312,6 @@ dependencies = [ "hyper-rustls", "indexmap 2.0.0", "insta", - "introspector-gadget", "itertools 0.11.0", "jsonpath-rust", "jsonpath_lib", @@ -453,7 +442,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13ed94bb9de9f24da12ca2122b8eeaa7484d17b090dc84daaaba6b6ac2bee89b" dependencies = [ - "apollo-encoder 0.7.0", + "apollo-encoder", "apollo-parser 0.6.1", "arbitrary", "once_cell", @@ -1005,17 +994,6 @@ dependencies = [ "tower-service", ] -[[package]] -name = "backoff" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" -dependencies = [ - "getrandom 0.2.10", - "instant", - "rand 0.8.5", -] - [[package]] name = "backtrace" version = "0.3.68" @@ -3324,23 +3302,6 @@ version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" -[[package]] -name = "introspector-gadget" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f07e629cb6382fac6e1fb1560123f81438556e6fe4219fec939ad5ff4345d9fa" -dependencies = [ - "apollo-encoder 0.5.1", - "backoff", - "graphql_client", - "hyper", - "reqwest", - "serde", - "serde_json", - "thiserror", - "tracing", -] - [[package]] name = "inventory" version = "0.2.3" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 1334b2f0aa..8c70c8c4d4 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -263,7 +263,6 @@ ecdsa = { version = "0.15.1", features = ["signing", "pem", "pkcs8"] } fred = { version = "6.3.1", features = ["enable-rustls", "no-client-setname"] } futures-test = "0.3.28" insta = { version = "1.31.0", features = ["json", "redactions", "yaml"] } -introspector-gadget = "0.2.2" maplit = "1.0.2" memchr = { version = "2.6.3", default-features = false } mockall = "0.11.4" From 0d30b4713bedbe083791cd753b3f5d66b7b1ebf0 Mon Sep 17 00:00:00 2001 From: Maria Elisabeth Schreiber Date: Tue, 29 Aug 2023 23:00:38 -0600 Subject: [PATCH 27/81] docs: add persisted query configuration options info (#3652) --- docs/source/configuration/overview.mdx | 6 ++ .../configuration/persisted-queries.mdx | 64 ++++++++++++++++++- 2 files changed, 68 insertions(+), 2 deletions(-) diff --git a/docs/source/configuration/overview.mdx b/docs/source/configuration/overview.mdx index 212f3fe467..b68ecb664d 100644 --- a/docs/source/configuration/overview.mdx +++ b/docs/source/configuration/overview.mdx @@ -491,6 +491,12 @@ You can configure certain caching behaviors for generated query plans and APQ (b **If you have a GraphOS Enterprise plan,** you can also configure a Redis-backed _distributed_ cache that enables multiple router instances to share cached values. For details, see [Distributed caching in the Apollo Router](./distributed-caching/). +### Safelisting with persisted queries + +You can enhance your graph's security by maintaining a persisted query list (PQL), an operation safelist made by your first-party apps. As opposed to automatic persisted queries (APQ) where operations are automatically cached, operations must be preregistered to the PQL. Once configured, the router checks incoming requests against the PQL. + +See [Safelisting with persisted queries](./persisted-queries) for more information. + ### HTTP header rules See [Sending HTTP headers to subgraphs](./header-propagation/). diff --git a/docs/source/configuration/persisted-queries.mdx b/docs/source/configuration/persisted-queries.mdx index 6250845ad2..306b970b2d 100644 --- a/docs/source/configuration/persisted-queries.mdx +++ b/docs/source/configuration/persisted-queries.mdx @@ -6,7 +6,7 @@ minVersion: 1.25.0 - + @@ -34,4 +34,64 @@ For more information on other configuration aspects, see the [GraphOS persisted ### Router security levels - \ No newline at end of file + + +### Configuration options + +The router provides four configuration options that you can combine to create the recommended [security levels](#router-security-levels). This section details each configuration option. Refer to the [security levels](#router-security-levels) section for recommended combinations. + +#### `preview_persisted_queries` + +This base configuration enables the feature. All other configuration options build off this one. + +```yaml title="router.yaml" +preview_persisted_queries: + enabled: true +``` + +#### `log_unknown` + +Adding `log_unknown: true` to `preview_persisted_queries` configures the router to log any incoming operations not preregistered to the PQL. + +```yaml title="router.yaml" +preview_persisted_queries: + enabled: true + log_unknown: true +``` + +If used with the [`safelist`](#safelist) option, the router logs unregistered and rejected operations. With [`safelist.required_id`](#require_id) off, the only rejected operations are unregistered ones. If [`safelist.required_id`](#require_id) is turned on, operations can be rejected even when preregistered because they use operation IDs rather than operation strings. + +#### `safelist` + +Adding `safelist: true` to `preview_persisted_queries` causes the router to reject any operations that haven't been preregistered to your PQL. + +```yaml title="router.yaml" +preview_persisted_queries: + enabled: true + safelist: + enabled: true +apq: + enabled: false +``` + +> **Note:** To enable safelisting, you _must_ turn off [automatic persisted queries](./in-memory-caching#caching-automatic-persisted-queries-apq) (APQs). APQs let clients [register arbitrary operations at runtime](/graphos/operations/persisted-queries/#differences-from-automatic-persisted-queries) while safelisting restricts operations to those that have been explicitly preregistered. + +By default, the [`require_id`](#required_id) suboption is `false`, meaning the router accepts both operation IDs and operation strings as long as the operation is preregistered. + +#### `require_id` + +Adding `require_id: true` to the `safelist` option causes the router to reject any operations that either: +- haven't been preregistered to your PQL +- use a full operation string rather than the operation ID + +```yaml title="router.yaml" +preview_persisted_queries: + enabled: true + safelist: + enabled: true + require_id: true +apq: + enabled: false +``` + +> **Note:** To enable safelisting, you _must_ turn off [automatic persisted queries](./in-memory-caching#caching-automatic-persisted-queries-apq) (APQs). APQs let clients [register arbitrary operations at runtime](/graphos/operations/persisted-queries/#differences-from-automatic-persisted-queries) while safelisting restricts operations to those that have been explicitly preregistered. From 0402f7a89d6ea7050d54dab9aad64a9bc2da30a4 Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Fri, 1 Sep 2023 14:21:54 +0000 Subject: [PATCH 28/81] prep release: v1.29.0-rc.0 --- Cargo.lock | 6 +- apollo-router-benchmarks/Cargo.toml | 2 +- apollo-router-scaffold/Cargo.toml | 2 +- .../templates/base/Cargo.toml | 2 +- .../templates/base/xtask/Cargo.toml | 2 +- apollo-router/Cargo.toml | 2 +- .../tracing/docker-compose.datadog.yml | 2 +- dockerfiles/tracing/docker-compose.jaeger.yml | 2 +- dockerfiles/tracing/docker-compose.zipkin.yml | 2 +- docs/source/containerization/docker.mdx | 2 +- docs/source/containerization/kubernetes.mdx | 28 +- helm/chart/router/Chart.yaml | 4 +- helm/chart/router/README.md | 6 +- licenses.html | 261 ++++++++++++++---- scripts/install.sh | 2 +- 15 files changed, 242 insertions(+), 83 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index aa06341378..a0419cdea5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -263,7 +263,7 @@ dependencies = [ [[package]] name = "apollo-router" -version = "1.28.1" +version = "1.29.0-rc.0" dependencies = [ "access-json", "anyhow", @@ -408,7 +408,7 @@ dependencies = [ [[package]] name = "apollo-router-benchmarks" -version = "1.28.1" +version = "1.29.0-rc.0" dependencies = [ "apollo-parser 0.6.1", "apollo-router", @@ -424,7 +424,7 @@ dependencies = [ [[package]] name = "apollo-router-scaffold" -version = "1.28.1" +version = "1.29.0-rc.0" dependencies = [ "anyhow", "cargo-scaffold", diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml index b0c793e0d6..f14ad11397 100644 --- a/apollo-router-benchmarks/Cargo.toml +++ b/apollo-router-benchmarks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-benchmarks" -version = "1.28.1" +version = "1.29.0-rc.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml index c738ea1f2f..840060268d 100644 --- a/apollo-router-scaffold/Cargo.toml +++ b/apollo-router-scaffold/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-scaffold" -version = "1.28.1" +version = "1.29.0-rc.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/templates/base/Cargo.toml b/apollo-router-scaffold/templates/base/Cargo.toml index b58b407973..5c4b295961 100644 --- a/apollo-router-scaffold/templates/base/Cargo.toml +++ b/apollo-router-scaffold/templates/base/Cargo.toml @@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" } apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} # Note if you update these dependencies then also update xtask/Cargo.toml -apollo-router = "1.28.1" +apollo-router = "1.29.0-rc.0" {{/if}} {{/if}} async-trait = "0.1.52" diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.toml index 3d544fbc12..0fed2cfce8 100644 --- a/apollo-router-scaffold/templates/base/xtask/Cargo.toml +++ b/apollo-router-scaffold/templates/base/xtask/Cargo.toml @@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" } {{#if branch}} apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} -apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.28.1" } +apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.29.0-rc.0" } {{/if}} {{/if}} anyhow = "1.0.58" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 8c70c8c4d4..aafb0e33e0 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router" -version = "1.28.1" +version = "1.29.0-rc.0" authors = ["Apollo Graph, Inc. "] repository = "https://github.com/apollographql/router/" documentation = "https://docs.rs/apollo-router" diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml index 6f851e23b3..6863342529 100644 --- a/dockerfiles/tracing/docker-compose.datadog.yml +++ b/dockerfiles/tracing/docker-compose.datadog.yml @@ -3,7 +3,7 @@ services: apollo-router: container_name: apollo-router - image: ghcr.io/apollographql/router:v1.28.1 + image: ghcr.io/apollographql/router:v1.29.0-rc.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/datadog.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.jaeger.yml b/dockerfiles/tracing/docker-compose.jaeger.yml index ef501fff3c..162b8dd19d 100644 --- a/dockerfiles/tracing/docker-compose.jaeger.yml +++ b/dockerfiles/tracing/docker-compose.jaeger.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router #build: ./router - image: ghcr.io/apollographql/router:v1.28.1 + image: ghcr.io/apollographql/router:v1.29.0-rc.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/jaeger.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml index 73c033766b..e1f95e15db 100644 --- a/dockerfiles/tracing/docker-compose.zipkin.yml +++ b/dockerfiles/tracing/docker-compose.zipkin.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router build: ./router - image: ghcr.io/apollographql/router:v1.28.1 + image: ghcr.io/apollographql/router:v1.29.0-rc.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/zipkin.router.yaml:/etc/config/configuration.yaml diff --git a/docs/source/containerization/docker.mdx b/docs/source/containerization/docker.mdx index b3c78c5442..7e59ab5dde 100644 --- a/docs/source/containerization/docker.mdx +++ b/docs/source/containerization/docker.mdx @@ -11,7 +11,7 @@ The default behaviour of the router images is suitable for a quickstart or devel Note: The [docker documentation](https://docs.docker.com/engine/reference/run/) for the run command may be helpful when reading through the examples. -Note: The exact image version to use is your choice depending on which release you wish to use. In the following examples, replace `` with your chosen version. e.g.: `v1.28.1` +Note: The exact image version to use is your choice depending on which release you wish to use. In the following examples, replace `` with your chosen version. e.g.: `v1.29.0-rc.0` ## Override the configuration diff --git a/docs/source/containerization/kubernetes.mdx b/docs/source/containerization/kubernetes.mdx index d4faa36415..a97a2ef428 100644 --- a/docs/source/containerization/kubernetes.mdx +++ b/docs/source/containerization/kubernetes.mdx @@ -13,7 +13,7 @@ import { Link } from 'gatsby'; [Helm](https://helm.sh) is the package manager for kubernetes. -There is a complete [helm chart definition](https://github.com/apollographql/router/tree/v1.28.1/helm/chart/router) in the repo which illustrates how to use helm to deploy the router in kubernetes. +There is a complete [helm chart definition](https://github.com/apollographql/router/tree/v1.29.0-rc.0/helm/chart/router) in the repo which illustrates how to use helm to deploy the router in kubernetes. In both the following examples, we are using helm to install the router: - into namespace "router-deploy" (create namespace if it doesn't exist) @@ -64,10 +64,10 @@ kind: ServiceAccount metadata: name: release-name-router labels: - helm.sh/chart: router-1.28.1 + helm.sh/chart: router-1.29.0-rc.0 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.28.1" + app.kubernetes.io/version: "v1.29.0-rc.0" app.kubernetes.io/managed-by: Helm --- # Source: router/templates/secret.yaml @@ -76,10 +76,10 @@ kind: Secret metadata: name: "release-name-router" labels: - helm.sh/chart: router-1.28.1 + helm.sh/chart: router-1.29.0-rc.0 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.28.1" + app.kubernetes.io/version: "v1.29.0-rc.0" app.kubernetes.io/managed-by: Helm data: managedFederationApiKey: "UkVEQUNURUQ=" @@ -90,10 +90,10 @@ kind: ConfigMap metadata: name: release-name-router labels: - helm.sh/chart: router-1.28.1 + helm.sh/chart: router-1.29.0-rc.0 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.28.1" + app.kubernetes.io/version: "v1.29.0-rc.0" app.kubernetes.io/managed-by: Helm data: configuration.yaml: | @@ -117,10 +117,10 @@ kind: Service metadata: name: release-name-router labels: - helm.sh/chart: router-1.28.1 + helm.sh/chart: router-1.29.0-rc.0 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.28.1" + app.kubernetes.io/version: "v1.29.0-rc.0" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -143,10 +143,10 @@ kind: Deployment metadata: name: release-name-router labels: - helm.sh/chart: router-1.28.1 + helm.sh/chart: router-1.29.0-rc.0 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.28.1" + app.kubernetes.io/version: "v1.29.0-rc.0" app.kubernetes.io/managed-by: Helm annotations: @@ -174,7 +174,7 @@ spec: - name: router securityContext: {} - image: "ghcr.io/apollographql/router:v1.28.1" + image: "ghcr.io/apollographql/router:v1.29.0-rc.0" imagePullPolicy: IfNotPresent args: - --hot-reload @@ -226,10 +226,10 @@ kind: Pod metadata: name: "release-name-router-test-connection" labels: - helm.sh/chart: router-1.28.1 + helm.sh/chart: router-1.29.0-rc.0 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.28.1" + app.kubernetes.io/version: "v1.29.0-rc.0" app.kubernetes.io/managed-by: Helm annotations: "helm.sh/hook": test diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml index db9763c360..62cc273e96 100644 --- a/helm/chart/router/Chart.yaml +++ b/helm/chart/router/Chart.yaml @@ -20,10 +20,10 @@ type: application # so it matches the shape of our release process and release automation. # By proxy of that decision, this version uses SemVer 2.0.0, though the prefix # of "v" is not included. -version: 1.28.1 +version: 1.29.0-rc.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "v1.28.1" \ No newline at end of file +appVersion: "v1.29.0-rc.0" \ No newline at end of file diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md index cf7b386c04..7fb948a1a7 100644 --- a/helm/chart/router/README.md +++ b/helm/chart/router/README.md @@ -2,7 +2,7 @@ [router](https://github.com/apollographql/router) Rust Graph Routing runtime for Apollo Federation -![Version: 1.28.1](https://img.shields.io/badge/Version-1.28.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.28.1](https://img.shields.io/badge/AppVersion-v1.28.1-informational?style=flat-square) +![Version: 1.29.0-rc.0](https://img.shields.io/badge/Version-1.29.0--rc.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.29.0-rc.0](https://img.shields.io/badge/AppVersion-v1.29.0--rc.0-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ ## Get Repo Info ```console -helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.28.1 +helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.29.0-rc.0 ``` ## Install Chart @@ -19,7 +19,7 @@ helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.28.1 **Important:** only helm3 is supported ```console -helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.28.1 --values my-values.yaml +helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.29.0-rc.0 --values my-values.yaml ``` _See [configuration](#configuration) below._ diff --git a/licenses.html b/licenses.html index 687cddf030..4328bb758a 100644 --- a/licenses.html +++ b/licenses.html @@ -44,8 +44,8 @@

Third Party Licenses

Overview of licenses:

    -
  • MIT License (95)
  • -
  • Apache License 2.0 (58)
  • +
  • MIT License (96)
  • +
  • Apache License 2.0 (62)
  • BSD 3-Clause "New" or "Revised" License (9)
  • ISC License (9)
  • Creative Commons Zero v1.0 Universal (2)
  • @@ -260,7 +260,6 @@

    Used by:

    Apache License 2.0

    Used by:

    MIT OR Apache-2.0
    +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    MIT or Apache-2.0
    +
    +
  • +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    The Apache License, Version 2.0 (Apache-2.0)
    +
    +Copyright 2015-2020 the fiat-crypto authors (see the AUTHORS file)
    +
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +
    +
  • BSD 2-Clause "Simplified" License

    Used by:

    @@ -11298,41 +11365,6 @@

    Used by:

    2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -
  • -
  • -

    BSD 3-Clause "New" or "Revised" License

    -

    Used by:

    - -
    // Copyright 2015 The Chromium Authors. All rights reserved.
    -//
    -// Redistribution and use in source and binary forms, with or without
    -// modification, are permitted provided that the following conditions are
    -// met:
    -//
    -//    * Redistributions of source code must retain the above copyright
    -// notice, this list of conditions and the following disclaimer.
    -//    * Redistributions in binary form must reproduce the above
    -// copyright notice, this list of conditions and the following disclaimer
    -// in the documentation and/or other materials provided with the
    -// distribution.
    -//    * Neither the name of Google Inc. nor the names of its
    -// contributors may be used to endorse or promote products derived from
    -// this software without specific prior written permission.
    -//
    -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
    -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
    -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     
  • @@ -11467,6 +11499,78 @@

    Used by:

    LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +
  • +
  • +

    BSD 3-Clause "New" or "Revised" License

    +

    Used by:

    + +
    Copyright (c) 2016-2019 Isis Agora Lovecruft, Henry de Valence. All rights reserved.
    +
    +Redistribution and use in source and binary forms, with or without
    +modification, are permitted provided that the following conditions are
    +met:
    +
    +1. Redistributions of source code must retain the above copyright
    +notice, this list of conditions and the following disclaimer.
    +
    +2. Redistributions in binary form must reproduce the above copyright
    +notice, this list of conditions and the following disclaimer in the
    +documentation and/or other materials provided with the distribution.
    +
    +3. Neither the name of the copyright holder nor the names of its
    +contributors may be used to endorse or promote products derived from
    +this software without specific prior written permission.
    +
    +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
    +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
    +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
    +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
    +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
    +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
    +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
    +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
    +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
    +
    +========================================================================
    +
    +Portions of curve25519-dalek were originally derived from Adam Langley's
    +Go ed25519 implementation, found at <https://github.com/agl/ed25519/>,
    +under the following licence:
    +
    +========================================================================
    +
    +Copyright (c) 2012 The Go Authors. All rights reserved.
    +
    +Redistribution and use in source and binary forms, with or without
    +modification, are permitted provided that the following conditions are
    +met:
    +
    +   * Redistributions of source code must retain the above copyright
    +notice, this list of conditions and the following disclaimer.
    +   * Redistributions in binary form must reproduce the above
    +copyright notice, this list of conditions and the following disclaimer
    +in the documentation and/or other materials provided with the
    +distribution.
    +   * Neither the name of Google Inc. nor the names of its
    +contributors may be used to endorse or promote products derived from
    +this software without specific prior written permission.
    +
    +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
    +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
    +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
    +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
    +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
    +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
    +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
    +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
    +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
    +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
    +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     
  • @@ -11547,7 +11651,6 @@

    Used by:

    Copyright (c) <year> <owner>. 
     
    @@ -12171,7 +12274,6 @@ 

    Used by:

    ISC License

    Used by:

    Except as otherwise noted, this project is licensed under the following
    @@ -13331,6 +13433,61 @@ 

    Used by:

    OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +
    +
  • +
  • +

    MIT License

    +

    Used by:

    + +
    Copyright (c) 2023 Tokio Contributors
    +
    +Permission is hereby granted, free of charge, to any
    +person obtaining a copy of this software and associated
    +documentation files (the "Software"), to deal in the
    +Software without restriction, including without
    +limitation the rights to use, copy, modify, merge,
    +publish, distribute, sublicense, and/or sell copies of
    +the Software, and to permit persons to whom the Software
    +is furnished to do so, subject to the following
    +conditions:
    +
    +The above copyright notice and this permission notice
    +shall be included in all copies or substantial portions
    +of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
    +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
    +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
    +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
    +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
    +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
    +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
    +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
    +DEALINGS IN THE SOFTWARE.
    +
    +The MIT License (MIT)
    +
    +Copyright (c) 2019 Yoshua Wuyts
    +
    +Permission is hereby granted, free of charge, to any person obtaining a copy
    +of this software and associated documentation files (the "Software"), to deal
    +in the Software without restriction, including without limitation the rights
    +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    +copies of the Software, and to permit persons to whom the Software is
    +furnished to do so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in all
    +copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    +SOFTWARE.
     
  • @@ -13643,6 +13800,7 @@

    Used by:

    MIT License

    Used by:

    MIT License
    @@ -13917,7 +14075,6 @@ 

    Used by:

  • jsonschema
  • lazy-regex-proc_macros
  • serde_v8
  • -
  • tokio-macros
  • v8
  • valuable
  • void
  • @@ -14894,11 +15051,13 @@

    MIT License

    Used by:

    This project is dual-licensed under the Unlicense and MIT licenses.
     
    diff --git a/scripts/install.sh b/scripts/install.sh
    index 36a23bc679..624b2e9656 100755
    --- a/scripts/install.sh
    +++ b/scripts/install.sh
    @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="https://github.com/apollographql/router/releases/downloa
     
     # Router version defined in apollo-router's Cargo.toml
     # Note: Change this line manually during the release steps.
    -PACKAGE_VERSION="v1.28.1"
    +PACKAGE_VERSION="v1.29.0-rc.0"
     
     download_binary() {
         downloader --check
    
    From 9c1b2fa6ecdafef6cc1876681f9f789af156a2fb Mon Sep 17 00:00:00 2001
    From: Jesse Rosenberger 
    Date: Fri, 1 Sep 2023 15:41:52 +0000
    Subject: [PATCH 29/81] prep release: v1.29.0-rc.1
    
    ---
     Cargo.lock                                    |   6 +-
     apollo-router-benchmarks/Cargo.toml           |   2 +-
     apollo-router-scaffold/Cargo.toml             |   2 +-
     .../templates/base/Cargo.toml                 |   2 +-
     .../templates/base/xtask/Cargo.toml           |   2 +-
     apollo-router/Cargo.toml                      |   2 +-
     .../tracing/docker-compose.datadog.yml        |   2 +-
     dockerfiles/tracing/docker-compose.jaeger.yml |   2 +-
     dockerfiles/tracing/docker-compose.zipkin.yml |   2 +-
     docs/source/containerization/docker.mdx       |   2 +-
     docs/source/containerization/kubernetes.mdx   |  28 +-
     helm/chart/router/Chart.yaml                  |   4 +-
     helm/chart/router/README.md                   |   6 +-
     licenses.html                                 | 355 +++---------------
     scripts/install.sh                            |   2 +-
     15 files changed, 80 insertions(+), 339 deletions(-)
    
    diff --git a/Cargo.lock b/Cargo.lock
    index a0419cdea5..b8db8cad63 100644
    --- a/Cargo.lock
    +++ b/Cargo.lock
    @@ -263,7 +263,7 @@ dependencies = [
     
     [[package]]
     name = "apollo-router"
    -version = "1.29.0-rc.0"
    +version = "1.29.0-rc.1"
     dependencies = [
      "access-json",
      "anyhow",
    @@ -408,7 +408,7 @@ dependencies = [
     
     [[package]]
     name = "apollo-router-benchmarks"
    -version = "1.29.0-rc.0"
    +version = "1.29.0-rc.1"
     dependencies = [
      "apollo-parser 0.6.1",
      "apollo-router",
    @@ -424,7 +424,7 @@ dependencies = [
     
     [[package]]
     name = "apollo-router-scaffold"
    -version = "1.29.0-rc.0"
    +version = "1.29.0-rc.1"
     dependencies = [
      "anyhow",
      "cargo-scaffold",
    diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml
    index f14ad11397..f15620564b 100644
    --- a/apollo-router-benchmarks/Cargo.toml
    +++ b/apollo-router-benchmarks/Cargo.toml
    @@ -1,6 +1,6 @@
     [package]
     name = "apollo-router-benchmarks"
    -version = "1.29.0-rc.0"
    +version = "1.29.0-rc.1"
     authors = ["Apollo Graph, Inc. "]
     edition = "2021"
     license = "Elastic-2.0"
    diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml
    index 840060268d..6587e779ab 100644
    --- a/apollo-router-scaffold/Cargo.toml
    +++ b/apollo-router-scaffold/Cargo.toml
    @@ -1,6 +1,6 @@
     [package]
     name = "apollo-router-scaffold"
    -version = "1.29.0-rc.0"
    +version = "1.29.0-rc.1"
     authors = ["Apollo Graph, Inc. "]
     edition = "2021"
     license = "Elastic-2.0"
    diff --git a/apollo-router-scaffold/templates/base/Cargo.toml b/apollo-router-scaffold/templates/base/Cargo.toml
    index 5c4b295961..8a58f32727 100644
    --- a/apollo-router-scaffold/templates/base/Cargo.toml
    +++ b/apollo-router-scaffold/templates/base/Cargo.toml
    @@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" }
     apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" }
     {{else}}
     # Note if you update these dependencies then also update xtask/Cargo.toml
    -apollo-router = "1.29.0-rc.0"
    +apollo-router = "1.29.0-rc.1"
     {{/if}}
     {{/if}}
     async-trait = "0.1.52"
    diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.toml
    index 0fed2cfce8..bac38e6af6 100644
    --- a/apollo-router-scaffold/templates/base/xtask/Cargo.toml
    +++ b/apollo-router-scaffold/templates/base/xtask/Cargo.toml
    @@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" }
     {{#if branch}}
     apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" }
     {{else}}
    -apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.29.0-rc.0" }
    +apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.29.0-rc.1" }
     {{/if}}
     {{/if}}
     anyhow = "1.0.58"
    diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml
    index aafb0e33e0..ef71fad1d0 100644
    --- a/apollo-router/Cargo.toml
    +++ b/apollo-router/Cargo.toml
    @@ -1,6 +1,6 @@
     [package]
     name = "apollo-router"
    -version = "1.29.0-rc.0"
    +version = "1.29.0-rc.1"
     authors = ["Apollo Graph, Inc. "]
     repository = "https://github.com/apollographql/router/"
     documentation = "https://docs.rs/apollo-router"
    diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml
    index 6863342529..c386463caf 100644
    --- a/dockerfiles/tracing/docker-compose.datadog.yml
    +++ b/dockerfiles/tracing/docker-compose.datadog.yml
    @@ -3,7 +3,7 @@ services:
     
       apollo-router:
         container_name: apollo-router
    -    image: ghcr.io/apollographql/router:v1.29.0-rc.0
    +    image: ghcr.io/apollographql/router:v1.29.0-rc.1
         volumes:
           - ./supergraph.graphql:/etc/config/supergraph.graphql
           - ./router/datadog.router.yaml:/etc/config/configuration.yaml
    diff --git a/dockerfiles/tracing/docker-compose.jaeger.yml b/dockerfiles/tracing/docker-compose.jaeger.yml
    index 162b8dd19d..072e4d3ad5 100644
    --- a/dockerfiles/tracing/docker-compose.jaeger.yml
    +++ b/dockerfiles/tracing/docker-compose.jaeger.yml
    @@ -4,7 +4,7 @@ services:
       apollo-router:
         container_name: apollo-router
         #build: ./router
    -    image: ghcr.io/apollographql/router:v1.29.0-rc.0
    +    image: ghcr.io/apollographql/router:v1.29.0-rc.1
         volumes:
           - ./supergraph.graphql:/etc/config/supergraph.graphql
           - ./router/jaeger.router.yaml:/etc/config/configuration.yaml
    diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml
    index e1f95e15db..c45f63aef3 100644
    --- a/dockerfiles/tracing/docker-compose.zipkin.yml
    +++ b/dockerfiles/tracing/docker-compose.zipkin.yml
    @@ -4,7 +4,7 @@ services:
       apollo-router:
         container_name: apollo-router
         build: ./router
    -    image: ghcr.io/apollographql/router:v1.29.0-rc.0
    +    image: ghcr.io/apollographql/router:v1.29.0-rc.1
         volumes:
           - ./supergraph.graphql:/etc/config/supergraph.graphql
           - ./router/zipkin.router.yaml:/etc/config/configuration.yaml
    diff --git a/docs/source/containerization/docker.mdx b/docs/source/containerization/docker.mdx
    index 7e59ab5dde..6eca789cde 100644
    --- a/docs/source/containerization/docker.mdx
    +++ b/docs/source/containerization/docker.mdx
    @@ -11,7 +11,7 @@ The default behaviour of the router images is suitable for a quickstart or devel
     
     Note: The [docker documentation](https://docs.docker.com/engine/reference/run/) for the run command may be helpful when reading through the examples.
     
    -Note: The exact image version to use is your choice depending on which release you wish to use. In the following examples, replace `` with your chosen version. e.g.: `v1.29.0-rc.0`
    +Note: The exact image version to use is your choice depending on which release you wish to use. In the following examples, replace `` with your chosen version. e.g.: `v1.29.0-rc.1`
     
     ## Override the configuration
     
    diff --git a/docs/source/containerization/kubernetes.mdx b/docs/source/containerization/kubernetes.mdx
    index a97a2ef428..126b6677b9 100644
    --- a/docs/source/containerization/kubernetes.mdx
    +++ b/docs/source/containerization/kubernetes.mdx
    @@ -13,7 +13,7 @@ import { Link } from 'gatsby';
     
     [Helm](https://helm.sh) is the package manager for kubernetes.
     
    -There is a complete [helm chart definition](https://github.com/apollographql/router/tree/v1.29.0-rc.0/helm/chart/router) in the repo which illustrates how to use helm to deploy the router in kubernetes.
    +There is a complete [helm chart definition](https://github.com/apollographql/router/tree/v1.29.0-rc.1/helm/chart/router) in the repo which illustrates how to use helm to deploy the router in kubernetes.
     
     In both the following examples, we are using helm to install the router:
      - into namespace "router-deploy" (create namespace if it doesn't exist)
    @@ -64,10 +64,10 @@ kind: ServiceAccount
     metadata:
       name: release-name-router
       labels:
    -    helm.sh/chart: router-1.29.0-rc.0
    +    helm.sh/chart: router-1.29.0-rc.1
         app.kubernetes.io/name: router
         app.kubernetes.io/instance: release-name
    -    app.kubernetes.io/version: "v1.29.0-rc.0"
    +    app.kubernetes.io/version: "v1.29.0-rc.1"
         app.kubernetes.io/managed-by: Helm
     ---
     # Source: router/templates/secret.yaml
    @@ -76,10 +76,10 @@ kind: Secret
     metadata:
       name: "release-name-router"
       labels:
    -    helm.sh/chart: router-1.29.0-rc.0
    +    helm.sh/chart: router-1.29.0-rc.1
         app.kubernetes.io/name: router
         app.kubernetes.io/instance: release-name
    -    app.kubernetes.io/version: "v1.29.0-rc.0"
    +    app.kubernetes.io/version: "v1.29.0-rc.1"
         app.kubernetes.io/managed-by: Helm
     data:
       managedFederationApiKey: "UkVEQUNURUQ="
    @@ -90,10 +90,10 @@ kind: ConfigMap
     metadata:
       name: release-name-router
       labels:
    -    helm.sh/chart: router-1.29.0-rc.0
    +    helm.sh/chart: router-1.29.0-rc.1
         app.kubernetes.io/name: router
         app.kubernetes.io/instance: release-name
    -    app.kubernetes.io/version: "v1.29.0-rc.0"
    +    app.kubernetes.io/version: "v1.29.0-rc.1"
         app.kubernetes.io/managed-by: Helm
     data:
       configuration.yaml: |
    @@ -117,10 +117,10 @@ kind: Service
     metadata:
       name: release-name-router
       labels:
    -    helm.sh/chart: router-1.29.0-rc.0
    +    helm.sh/chart: router-1.29.0-rc.1
         app.kubernetes.io/name: router
         app.kubernetes.io/instance: release-name
    -    app.kubernetes.io/version: "v1.29.0-rc.0"
    +    app.kubernetes.io/version: "v1.29.0-rc.1"
         app.kubernetes.io/managed-by: Helm
     spec:
       type: ClusterIP
    @@ -143,10 +143,10 @@ kind: Deployment
     metadata:
       name: release-name-router
       labels:
    -    helm.sh/chart: router-1.29.0-rc.0
    +    helm.sh/chart: router-1.29.0-rc.1
         app.kubernetes.io/name: router
         app.kubernetes.io/instance: release-name
    -    app.kubernetes.io/version: "v1.29.0-rc.0"
    +    app.kubernetes.io/version: "v1.29.0-rc.1"
         app.kubernetes.io/managed-by: Helm
       
       annotations:
    @@ -174,7 +174,7 @@ spec:
             - name: router
               securityContext:
                 {}
    -          image: "ghcr.io/apollographql/router:v1.29.0-rc.0"
    +          image: "ghcr.io/apollographql/router:v1.29.0-rc.1"
               imagePullPolicy: IfNotPresent
               args:
                 - --hot-reload
    @@ -226,10 +226,10 @@ kind: Pod
     metadata:
       name: "release-name-router-test-connection"
       labels:
    -    helm.sh/chart: router-1.29.0-rc.0
    +    helm.sh/chart: router-1.29.0-rc.1
         app.kubernetes.io/name: router
         app.kubernetes.io/instance: release-name
    -    app.kubernetes.io/version: "v1.29.0-rc.0"
    +    app.kubernetes.io/version: "v1.29.0-rc.1"
         app.kubernetes.io/managed-by: Helm
       annotations:
         "helm.sh/hook": test
    diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml
    index 62cc273e96..d1088de86f 100644
    --- a/helm/chart/router/Chart.yaml
    +++ b/helm/chart/router/Chart.yaml
    @@ -20,10 +20,10 @@ type: application
     # so it matches the shape of our release process and release automation.
     # By proxy of that decision, this version uses SemVer 2.0.0, though the prefix
     # of "v" is not included.
    -version: 1.29.0-rc.0
    +version: 1.29.0-rc.1
     
     # This is the version number of the application being deployed. This version number should be
     # incremented each time you make changes to the application. Versions are not expected to
     # follow Semantic Versioning. They should reflect the version the application is using.
     # It is recommended to use it with quotes.
    -appVersion: "v1.29.0-rc.0"
    \ No newline at end of file
    +appVersion: "v1.29.0-rc.1"
    \ No newline at end of file
    diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md
    index 7fb948a1a7..8803d4a176 100644
    --- a/helm/chart/router/README.md
    +++ b/helm/chart/router/README.md
    @@ -2,7 +2,7 @@
     
     [router](https://github.com/apollographql/router) Rust Graph Routing runtime for Apollo Federation
     
    -![Version: 1.29.0-rc.0](https://img.shields.io/badge/Version-1.29.0--rc.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.29.0-rc.0](https://img.shields.io/badge/AppVersion-v1.29.0--rc.0-informational?style=flat-square)
    +![Version: 1.29.0-rc.1](https://img.shields.io/badge/Version-1.29.0--rc.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.29.0-rc.1](https://img.shields.io/badge/AppVersion-v1.29.0--rc.1-informational?style=flat-square)
     
     ## Prerequisites
     
    @@ -11,7 +11,7 @@
     ## Get Repo Info
     
     ```console
    -helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.29.0-rc.0
    +helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.29.0-rc.1
     ```
     
     ## Install Chart
    @@ -19,7 +19,7 @@ helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.29.0-rc.0
     **Important:** only helm3 is supported
     
     ```console
    -helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.29.0-rc.0 --values my-values.yaml
    +helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.29.0-rc.1 --values my-values.yaml
     ```
     
     _See [configuration](#configuration) below._
    diff --git a/licenses.html b/licenses.html
    index 4328bb758a..edc43d8893 100644
    --- a/licenses.html
    +++ b/licenses.html
    @@ -44,10 +44,10 @@ 

    Third Party Licenses

    Overview of licenses:

    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    # Contributing
    -
    -## License
    -
    -Licensed under either of
    -
    - * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
    - * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
    -
    -at your option.
    -
    -### Contribution
    -
    -Unless you explicitly state otherwise, any contribution intentionally submitted
    -for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any
    -additional terms or conditions.
    -
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    ../../LICENSE-APACHE
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    // Licensed under the Apache License, Version 2.0
    -// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
    -// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
    -// All files in the project carrying such notice may not be copied, modified, or distributed
    -// except according to those terms.
     
  • @@ -10724,7 +10678,6 @@

    Used by:

    Apache License 2.0

    Used by:

    Apache License
    @@ -11144,15 +11097,19 @@ 

    Used by:

    Apache License 2.0

    Used by:

    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    Copyright 2023 The allocator-api2 project developers
    -
    -Licensed under the Apache License, Version 2.0 (the "License");
    -you may not use this file except in compliance with the License.
    -You may obtain a copy of the License at
    -
    -	http://www.apache.org/licenses/LICENSE-2.0
    -
    -Unless required by applicable law or agreed to in writing, software
    -distributed under the License is distributed on an "AS IS" BASIS,
    -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -See the License for the specific language governing permissions and
    -limitations under the License.
  • Apache License 2.0

    @@ -11278,65 +11215,6 @@

    Used by:

Copyright [2022] [Bryn Cooke]
 
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
- -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    Licensed under the Apache License, Version 2.0
    -<LICENSE-APACHE or
    -http://www.apache.org/licenses/LICENSE-2.0> or the MIT
    -license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
    -at your option. All files in the project carrying such
    -notice may not be copied, modified, or distributed except
    -according to those terms.
    -
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    MIT OR Apache-2.0
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    MIT or Apache-2.0
    -
    -
  • -
  • -

    Apache License 2.0

    -

    Used by:

    - -
    The Apache License, Version 2.0 (Apache-2.0)
    -
    -Copyright 2015-2020 the fiat-crypto authors (see the AUTHORS file)
    -
     Licensed under the Apache License, Version 2.0 (the "License");
     you may not use this file except in compliance with the License.
     You may obtain a copy of the License at
    @@ -11499,78 +11377,6 @@ 

    Used by:

    LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -
    -
  • -
  • -

    BSD 3-Clause "New" or "Revised" License

    -

    Used by:

    - -
    Copyright (c) 2016-2019 Isis Agora Lovecruft, Henry de Valence. All rights reserved.
    -
    -Redistribution and use in source and binary forms, with or without
    -modification, are permitted provided that the following conditions are
    -met:
    -
    -1. Redistributions of source code must retain the above copyright
    -notice, this list of conditions and the following disclaimer.
    -
    -2. Redistributions in binary form must reproduce the above copyright
    -notice, this list of conditions and the following disclaimer in the
    -documentation and/or other materials provided with the distribution.
    -
    -3. Neither the name of the copyright holder nor the names of its
    -contributors may be used to endorse or promote products derived from
    -this software without specific prior written permission.
    -
    -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
    -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
    -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
    -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
    -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
    -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
    -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
    -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
    -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
    -
    -========================================================================
    -
    -Portions of curve25519-dalek were originally derived from Adam Langley's
    -Go ed25519 implementation, found at <https://github.com/agl/ed25519/>,
    -under the following licence:
    -
    -========================================================================
    -
    -Copyright (c) 2012 The Go Authors. All rights reserved.
    -
    -Redistribution and use in source and binary forms, with or without
    -modification, are permitted provided that the following conditions are
    -met:
    -
    -   * Redistributions of source code must retain the above copyright
    -notice, this list of conditions and the following disclaimer.
    -   * Redistributions in binary form must reproduce the above
    -copyright notice, this list of conditions and the following disclaimer
    -in the documentation and/or other materials provided with the
    -distribution.
    -   * Neither the name of Google Inc. nor the names of its
    -contributors may be used to endorse or promote products derived from
    -this software without specific prior written permission.
    -
    -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
    -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
    -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
    -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
    -OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
    -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
    -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
    -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
    -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
    -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
    -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     
  • @@ -11651,6 +11457,7 @@

    Used by:

    Copyright (c) <year> <owner>. 
     
    @@ -13433,61 +13240,6 @@ 

    Used by:

    OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -
    -
  • -
  • -

    MIT License

    -

    Used by:

    - -
    Copyright (c) 2023 Tokio Contributors
    -
    -Permission is hereby granted, free of charge, to any
    -person obtaining a copy of this software and associated
    -documentation files (the "Software"), to deal in the
    -Software without restriction, including without
    -limitation the rights to use, copy, modify, merge,
    -publish, distribute, sublicense, and/or sell copies of
    -the Software, and to permit persons to whom the Software
    -is furnished to do so, subject to the following
    -conditions:
    -
    -The above copyright notice and this permission notice
    -shall be included in all copies or substantial portions
    -of the Software.
    -
    -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
    -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
    -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
    -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
    -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
    -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
    -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
    -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
    -DEALINGS IN THE SOFTWARE.
    -
    -The MIT License (MIT)
    -
    -Copyright (c) 2019 Yoshua Wuyts
    -
    -Permission is hereby granted, free of charge, to any person obtaining a copy
    -of this software and associated documentation files (the "Software"), to deal
    -in the Software without restriction, including without limitation the rights
    -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    -copies of the Software, and to permit persons to whom the Software is
    -furnished to do so, subject to the following conditions:
    -
    -The above copyright notice and this permission notice shall be included in all
    -copies or substantial portions of the Software.
    -
    -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    -SOFTWARE.
     
  • @@ -14075,6 +13827,7 @@

    Used by:

  • jsonschema
  • lazy-regex-proc_macros
  • serde_v8
  • +
  • tokio-macros
  • v8
  • valuable
  • void
  • @@ -14367,42 +14120,12 @@

    Used by:

    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -
  • -

    MIT License

    -

    Used by:

    - -
    The MIT License (MIT)
    -
    -Copyright (c) 2014 Mathijs van de Nes
    -
    -Permission is hereby granted, free of charge, to any person obtaining a copy
    -of this software and associated documentation files (the "Software"), to deal
    -in the Software without restriction, including without limitation the rights
    -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    -copies of the Software, and to permit persons to whom the Software is
    -furnished to do so, subject to the following conditions:
    -
    -The above copyright notice and this permission notice shall be included in all
    -copies or substantial portions of the Software.
    -
    -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    -SOFTWARE.
    -
  • MIT License

    Used by:

    The MIT License (MIT)
     
    @@ -15051,13 +14774,7 @@ 

    MIT License

    Used by:

    This project is dual-licensed under the Unlicense and MIT licenses.
     
    @@ -15905,26 +15622,50 @@ 

    Used by:

    UNICODE, INC. LICENSE AGREEMENT - DATA FILES AND SOFTWARE
     
    -Unicode Data Files include all data files under the directories http://www.unicode.org/Public/, http://www.unicode.org/reports/, http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and http://www.unicode.org/utility/trac/browser/.
    -
    -Unicode Data Files do not include PDF online code charts under the directory http://www.unicode.org/Public/.
    +See Terms of Use <https://www.unicode.org/copyright.html>
    +for definitions of Unicode Inc.’s Data Files and Software.
     
    -Software includes any source code published in the Unicode Standard or under the directories http://www.unicode.org/Public/, http://www.unicode.org/reports/, http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and http://www.unicode.org/utility/trac/browser/.
    -
    -NOTICE TO USER: Carefully read the following legal agreement. BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"), YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE THE DATA FILES OR SOFTWARE.
    +NOTICE TO USER: Carefully read the following legal agreement.
    +BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S
    +DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"),
    +YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE
    +TERMS AND CONDITIONS OF THIS AGREEMENT.
    +IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE
    +THE DATA FILES OR SOFTWARE.
     
     COPYRIGHT AND PERMISSION NOTICE
     
    -Copyright © 1991-2016 Unicode, Inc. All rights reserved. Distributed under the Terms of Use in http://www.unicode.org/copyright.html.
    -
    -Permission is hereby granted, free of charge, to any person obtaining a copy of the Unicode data files and any associated documentation (the "Data Files") or Unicode software and any associated documentation (the "Software") to deal in the Data Files or Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, and/or sell copies of the Data Files or Software, and to permit persons to whom the Data Files or Software are furnished to do so, provided that either
    +Copyright © 1991-2022 Unicode, Inc. All rights reserved.
    +Distributed under the Terms of Use in https://www.unicode.org/copyright.html.
     
    -     (a) this copyright and permission notice appear with all copies of the Data Files or Software, or
    -     (b) this copyright and permission notice appear in associated Documentation.
    -
    -THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THE DATA FILES OR SOFTWARE.
    -
    -Except as contained in this notice, the name of a copyright holder shall not be used in advertising or otherwise to promote the sale, use or other dealings in these Data Files or Software without prior written authorization of the copyright holder.
    +Permission is hereby granted, free of charge, to any person obtaining
    +a copy of the Unicode data files and any associated documentation
    +(the "Data Files") or Unicode software and any associated documentation
    +(the "Software") to deal in the Data Files or Software
    +without restriction, including without limitation the rights to use,
    +copy, modify, merge, publish, distribute, and/or sell copies of
    +the Data Files or Software, and to permit persons to whom the Data Files
    +or Software are furnished to do so, provided that either
    +(a) this copyright and permission notice appear with all copies
    +of the Data Files or Software, or
    +(b) this copyright and permission notice appear in associated
    +Documentation.
    +
    +THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
    +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
    +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
    +NONINFRINGEMENT OF THIRD PARTY RIGHTS.
    +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
    +NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
    +DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
    +DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
    +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
    +PERFORMANCE OF THE DATA FILES OR SOFTWARE.
    +
    +Except as contained in this notice, the name of a copyright holder
    +shall not be used in advertising or otherwise to promote the sale,
    +use or other dealings in these Data Files or Software without prior
    +written authorization of the copyright holder.
     
  • diff --git a/scripts/install.sh b/scripts/install.sh index 624b2e9656..b9ba3c7cf6 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="https://github.com/apollographql/router/releases/downloa # Router version defined in apollo-router's Cargo.toml # Note: Change this line manually during the release steps. -PACKAGE_VERSION="v1.29.0-rc.0" +PACKAGE_VERSION="v1.29.0-rc.1" download_binary() { downloader --check From b1bbf972573f91f58b67eb96c78bfa7cfb2cc828 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Mon, 4 Sep 2023 14:37:40 +0200 Subject: [PATCH 30/81] GraphQL response processing must happen under the execution span (#3732) Previously, any event in processing would be reported under the supergraph span, or any plugin span (like rhai) happening in between --- ...fix_geal_fix_execution_span_attribution.md | 5 +++ .../src/services/execution_service.rs | 23 +++++++------ ...acing_tests__traced_basic_composition.snap | 32 +++++++++---------- .../tracing_tests__traced_basic_request.snap | 32 +++++++++---------- 4 files changed, 51 insertions(+), 41 deletions(-) create mode 100644 .changesets/fix_geal_fix_execution_span_attribution.md diff --git a/.changesets/fix_geal_fix_execution_span_attribution.md b/.changesets/fix_geal_fix_execution_span_attribution.md new file mode 100644 index 0000000000..03a11c421a --- /dev/null +++ b/.changesets/fix_geal_fix_execution_span_attribution.md @@ -0,0 +1,5 @@ +### GraphQL response processing must happen under the execution span ([PR #3732](https://github.com/apollographql/router/pull/3732)) + +Previously, any event in processing would be reported under the supergraph span, or any plugin span (like rhai) happening in between + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3732 \ No newline at end of file diff --git a/apollo-router/src/services/execution_service.rs b/apollo-router/src/services/execution_service.rs index 274b215265..5e38fab3a6 100644 --- a/apollo-router/src/services/execution_service.rs +++ b/apollo-router/src/services/execution_service.rs @@ -23,6 +23,7 @@ use tower::ServiceExt; use tower_service::Service; use tracing::event; use tracing::Instrument; +use tracing::Span; use tracing_core::Level; use super::new_service::ServiceFactory; @@ -171,17 +172,21 @@ impl ExecutionService { let schema = self.schema.clone(); let mut nullified_paths: Vec = vec![]; + let execution_span = Span::current(); + let stream = stream .filter_map(move |response: Response| { - ready(Self::process_graphql_response( - &query, - operation_name.as_deref(), - &variables, - is_deferred, - &schema, - &mut nullified_paths, - response, - )) + ready(execution_span.in_scope(|| { + Self::process_graphql_response( + &query, + operation_name.as_deref(), + &variables, + is_deferred, + &schema, + &mut nullified_paths, + response, + ) + })) }) .boxed(); diff --git a/apollo-router/tests/snapshots/tracing_tests__traced_basic_composition.snap b/apollo-router/tests/snapshots/tracing_tests__traced_basic_composition.snap index d729a01f61..8c976a59d0 100644 --- a/apollo-router/tests/snapshots/tracing_tests__traced_basic_composition.snap +++ b/apollo-router/tests/snapshots/tracing_tests__traced_basic_composition.snap @@ -707,24 +707,24 @@ expression: get_spans() } } } + }, + "apollo_router::services::execution_service::format_response": { + "name": "apollo_router::services::execution_service::format_response", + "record": { + "entries": [], + "metadata": { + "name": "format_response", + "target": "apollo_router::services::execution_service", + "level": "DEBUG", + "module_path": "apollo_router::services::execution_service", + "fields": { + "names": [] + } + } + }, + "children": {} } } - }, - "apollo_router::services::execution_service::format_response": { - "name": "apollo_router::services::execution_service::format_response", - "record": { - "entries": [], - "metadata": { - "name": "format_response", - "target": "apollo_router::services::execution_service", - "level": "DEBUG", - "module_path": "apollo_router::services::execution_service", - "fields": { - "names": [] - } - } - }, - "children": {} } } } diff --git a/apollo-router/tests/snapshots/tracing_tests__traced_basic_request.snap b/apollo-router/tests/snapshots/tracing_tests__traced_basic_request.snap index 628a15d7a3..e5c11265d4 100644 --- a/apollo-router/tests/snapshots/tracing_tests__traced_basic_request.snap +++ b/apollo-router/tests/snapshots/tracing_tests__traced_basic_request.snap @@ -422,24 +422,24 @@ expression: get_spans() "children": {} } } + }, + "apollo_router::services::execution_service::format_response": { + "name": "apollo_router::services::execution_service::format_response", + "record": { + "entries": [], + "metadata": { + "name": "format_response", + "target": "apollo_router::services::execution_service", + "level": "DEBUG", + "module_path": "apollo_router::services::execution_service", + "fields": { + "names": [] + } + } + }, + "children": {} } } - }, - "apollo_router::services::execution_service::format_response": { - "name": "apollo_router::services::execution_service::format_response", - "record": { - "entries": [], - "metadata": { - "name": "format_response", - "target": "apollo_router::services::execution_service", - "level": "DEBUG", - "module_path": "apollo_router::services::execution_service", - "fields": { - "names": [] - } - } - }, - "children": {} } } } From 17f68e3b8a89a28b742d24e8c2af39fe007db435 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Mon, 4 Sep 2023 16:08:54 +0200 Subject: [PATCH 31/81] prep release: v1.29.0 --- .../feat_geal_authorization_directives.md | 24 - .changesets/fix_bryn_fix_config_metrics.md | 5 - ...fix_geal_fix_execution_span_attribution.md | 5 - .changesets/fix_igni_update_router_bridge.md | 6 - .changesets/maint_bryn_uplink_client.md | 6 - .../maint_geal_authorization_analytics.md | 8 - .../maint_geal_unneeded_schema_parsing.md | 5 - CHANGELOG.md | 61 +++ Cargo.lock | 6 +- apollo-router-benchmarks/Cargo.toml | 2 +- apollo-router-scaffold/Cargo.toml | 2 +- .../templates/base/Cargo.toml | 2 +- .../templates/base/xtask/Cargo.toml | 2 +- apollo-router/Cargo.toml | 2 +- .../tracing/docker-compose.datadog.yml | 2 +- dockerfiles/tracing/docker-compose.jaeger.yml | 2 +- dockerfiles/tracing/docker-compose.zipkin.yml | 2 +- docs/source/containerization/docker.mdx | 2 +- docs/source/containerization/kubernetes.mdx | 28 +- helm/chart/router/Chart.yaml | 4 +- helm/chart/router/README.md | 9 +- licenses.html | 432 ++++++++++++++++-- scripts/install.sh | 2 +- 23 files changed, 475 insertions(+), 144 deletions(-) delete mode 100644 .changesets/feat_geal_authorization_directives.md delete mode 100644 .changesets/fix_bryn_fix_config_metrics.md delete mode 100644 .changesets/fix_geal_fix_execution_span_attribution.md delete mode 100644 .changesets/fix_igni_update_router_bridge.md delete mode 100644 .changesets/maint_bryn_uplink_client.md delete mode 100644 .changesets/maint_geal_authorization_analytics.md delete mode 100644 .changesets/maint_geal_unneeded_schema_parsing.md diff --git a/.changesets/feat_geal_authorization_directives.md b/.changesets/feat_geal_authorization_directives.md deleted file mode 100644 index 453076cdc9..0000000000 --- a/.changesets/feat_geal_authorization_directives.md +++ /dev/null @@ -1,24 +0,0 @@ -### GraphOS Enterprise: authorization directives ([PR #3397](https://github.com/apollographql/router/pull/3397), [PR #3662](https://github.com/apollographql/router/pull/3662)) - -We introduce two new directives, `requiresScopes` and `@authenticated`, that define authorization policies for fields and types in the supergraph schema. - -They are defined as follows: - -```graphql -scalar federation__Scope -directive @requiresScopes(scopes: [[federation__Scope!]!]!) on OBJECT | FIELD_DEFINITION | INTERFACE | SCALAR | ENUM - -directive @authenticated on OBJECT | FIELD_DEFINITION | INTERFACE | SCALAR | ENUM -``` - -The implementation hooks into the request lifecycle at multiple steps: -- In query analysis, we extract the list of scopes necessary to authorize the query. -- In a supergraph plugin, we calculate the authorization status and put it in the request context: - - for `@requiresScopes`, this is the intersection of the query's required scopes and the scopes provided in the request token - - for `@authenticated`, it is `is_authenticated` or not -- In the query planning phase, we filter the query to remove unauthorized fields before proceeding with query planning. -- At the subgraph level, if query deduplication is active, the authorization status is used to group queries together. -- At the execution service level, the response is first formatted according to the filtered query, which removed any unauthorized information, then to the shape of the original query, which propagates nulls as needed. -- At the execution service level, errors are added to the response indicating which fields were removed because they were not authorized. - -By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3397 https://github.com/apollographql/router/pull/3662 \ No newline at end of file diff --git a/.changesets/fix_bryn_fix_config_metrics.md b/.changesets/fix_bryn_fix_config_metrics.md deleted file mode 100644 index 678fc60b60..0000000000 --- a/.changesets/fix_bryn_fix_config_metrics.md +++ /dev/null @@ -1,5 +0,0 @@ -### Fix config metrics path and test for subscription callbacks ([Issue #3687](https://github.com/apollographql/router/issues/3687)) - -Detection of subscription callbacks has been fixed for internal Apollo metrics. This has no user facing impact. - -By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/3688 diff --git a/.changesets/fix_geal_fix_execution_span_attribution.md b/.changesets/fix_geal_fix_execution_span_attribution.md deleted file mode 100644 index 03a11c421a..0000000000 --- a/.changesets/fix_geal_fix_execution_span_attribution.md +++ /dev/null @@ -1,5 +0,0 @@ -### GraphQL response processing must happen under the execution span ([PR #3732](https://github.com/apollographql/router/pull/3732)) - -Previously, any event in processing would be reported under the supergraph span, or any plugin span (like rhai) happening in between - -By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3732 \ No newline at end of file diff --git a/.changesets/fix_igni_update_router_bridge.md b/.changesets/fix_igni_update_router_bridge.md deleted file mode 100644 index e3366e004a..0000000000 --- a/.changesets/fix_igni_update_router_bridge.md +++ /dev/null @@ -1,6 +0,0 @@ -### Update deno, so we can generate docs again ([Issue #3305](https://github.com/apollographql/router/issues/3305)) - -Router docs failed to build on crates.io because of a documentation compile error in Deno. -This updates Deno to the latest version, which allows us to generate crates.io documentation again. - -By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/3626 diff --git a/.changesets/maint_bryn_uplink_client.md b/.changesets/maint_bryn_uplink_client.md deleted file mode 100644 index 7c9776f106..0000000000 --- a/.changesets/maint_bryn_uplink_client.md +++ /dev/null @@ -1,6 +0,0 @@ -### Uplink connections now reuse reqwest client ([Issue #3333](https://github.com/apollographql/router/issues/3333)) - -Previously uplink requests created a new reqwest client each time, this may cause CPU spikes especially on OSX. -A single client will now be shared between requests of the same type. - -By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/3703 \ No newline at end of file diff --git a/.changesets/maint_geal_authorization_analytics.md b/.changesets/maint_geal_authorization_analytics.md deleted file mode 100644 index 50f235a639..0000000000 --- a/.changesets/maint_geal_authorization_analytics.md +++ /dev/null @@ -1,8 +0,0 @@ -### Add a metric tracking authorization usage ([PR #3660](https://github.com/apollographql/router/pull/3660)) - -The new metric is a counter called `apollo.router.operations.authorization` and contains the following boolean attributes: -- `filtered`: the query has one or more filtered fields -- `requires_scopes`: the query uses fields or types tagged with the `@requiresScopes` directive -- `authenticated`: the query uses fields or types tagged with the `@authenticated` directive - -By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3660 \ No newline at end of file diff --git a/.changesets/maint_geal_unneeded_schema_parsing.md b/.changesets/maint_geal_unneeded_schema_parsing.md deleted file mode 100644 index f0ee1ec159..0000000000 --- a/.changesets/maint_geal_unneeded_schema_parsing.md +++ /dev/null @@ -1,5 +0,0 @@ -### Remove unneeded schema parsing steps ([PR #3547](https://github.com/apollographql/router/pull/3547)) - -We need access to a parsed schema in various parts of the router, sometimes before the point where it is actually parsed and integrated with the rest of the configuration, so it was parsed multiple times to mitigate that. Some architecture changes made these parsing steps obsolete so they were removed. - -By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3547 \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 2895d7444e..24b7c15460 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,67 @@ All notable changes to Router will be documented in this file. This project adheres to [Semantic Versioning v2.0.0](https://semver.org/spec/v2.0.0.html). +# [1.29.0] - 2023-09-04 + +## 🚀 Features + +### GraphOS Enterprise: Authorization ([PR #3397](https://github.com/apollographql/router/pull/3397), [PR #3662](https://github.com/apollographql/router/pull/3662)) + +> ⚠️ This is an [Enterprise feature](https://www.apollographql.com/blog/platform/evaluating-apollo-router-understanding-free-and-open-vs-commercial-features/) of the Apollo Router. It requires an organization with a [GraphOS Enterprise plan](https://www.apollographql.com/pricing/). + +If your organization doesn't currently have an Enterprise plan, you can test out this functionality by signing up for a free [Enterprise trial](https://www.apollographql.com/docs/graphos/org/plans/#enterprise-trials). +We introduce two new directives, `@requiresScopes` and `@authenticated`, that define authorization policies for fields and types in the supergraph schema, composed with Federation version 2.5.3 or higher. + +They are defined as follows: + +```graphql +scalar federation__Scope +directive @requiresScopes(scopes: [[federation__Scope!]!]!) on OBJECT | FIELD_DEFINITION | INTERFACE | SCALAR | ENUM +``` + +This directive allows granular access control through user-defined scopes. + +```graphql +directive @authenticated on OBJECT | FIELD_DEFINITION | INTERFACE | SCALAR | ENUM +``` + +This directive allows access to the annotated field or type for authenticated requests only. +For more information on how to use these directives, please read Apollo Router [docs](https://www.apollographql.com/docs/router/configuration/authorization) +``` + + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3397 https://github.com/apollographql/router/pull/3662 + +## 🐛 Fixes + +### Update Deno to resolve Rust Docs generation failure ([Issue #3305](https://github.com/apollographql/router/issues/3305)) + +We've updated to the latest version of Deno (0.200) to fix errors when generating [docs.rs/apollo-router](https://docs.rs/crate/apollo-router/latest). + +By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/3626 + +### GraphQL response processing is now captured under the execution span ([PR #3732](https://github.com/apollographql/router/pull/3732)) + +Ensure processing is captured under the "execution" span. Previously, events would be reported under the supergraph span or — even more arbitrarily — any plugin's span (e.g., Rhai). + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3732 + +## 🛠 Maintenance + +### Apollo Uplink connections re-use the existing HTTP client ([Issue #3333](https://github.com/apollographql/router/issues/3333)) + +A single HTTP client will now be shared between requests of the same type when making requests to [Apollo Uplink](https://www.apollographql.com/docs/federation/managed-federation/uplink/) to fetch supergraphs, licenses and configuration from Studio. Previously, such requests created a new HTTP client on each periodic fetch which occasionally resulted in CPU spikes, especially on macOS. + +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/3703 + +### Remove unneeded schema parsing steps ([PR #3547](https://github.com/apollographql/router/pull/3547)) + +Access to a parsed schema is required in various parts of the Router. Previously were were parsing the schema multiple times, but this is now fixed. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3547 + + + # [1.28.1] - 2023-08-28 ## 🚀 Features diff --git a/Cargo.lock b/Cargo.lock index b8db8cad63..d02de46940 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -263,7 +263,7 @@ dependencies = [ [[package]] name = "apollo-router" -version = "1.29.0-rc.1" +version = "1.29.0" dependencies = [ "access-json", "anyhow", @@ -408,7 +408,7 @@ dependencies = [ [[package]] name = "apollo-router-benchmarks" -version = "1.29.0-rc.1" +version = "1.29.0" dependencies = [ "apollo-parser 0.6.1", "apollo-router", @@ -424,7 +424,7 @@ dependencies = [ [[package]] name = "apollo-router-scaffold" -version = "1.29.0-rc.1" +version = "1.29.0" dependencies = [ "anyhow", "cargo-scaffold", diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml index f15620564b..f9cfc72f03 100644 --- a/apollo-router-benchmarks/Cargo.toml +++ b/apollo-router-benchmarks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-benchmarks" -version = "1.29.0-rc.1" +version = "1.29.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml index 6587e779ab..2c006fa9b9 100644 --- a/apollo-router-scaffold/Cargo.toml +++ b/apollo-router-scaffold/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-scaffold" -version = "1.29.0-rc.1" +version = "1.29.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/templates/base/Cargo.toml b/apollo-router-scaffold/templates/base/Cargo.toml index 8a58f32727..f327476570 100644 --- a/apollo-router-scaffold/templates/base/Cargo.toml +++ b/apollo-router-scaffold/templates/base/Cargo.toml @@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" } apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} # Note if you update these dependencies then also update xtask/Cargo.toml -apollo-router = "1.29.0-rc.1" +apollo-router = "1.29.0" {{/if}} {{/if}} async-trait = "0.1.52" diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.toml index bac38e6af6..167ef3c605 100644 --- a/apollo-router-scaffold/templates/base/xtask/Cargo.toml +++ b/apollo-router-scaffold/templates/base/xtask/Cargo.toml @@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" } {{#if branch}} apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} -apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.29.0-rc.1" } +apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.29.0" } {{/if}} {{/if}} anyhow = "1.0.58" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index ef71fad1d0..de24279e81 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router" -version = "1.29.0-rc.1" +version = "1.29.0" authors = ["Apollo Graph, Inc. "] repository = "https://github.com/apollographql/router/" documentation = "https://docs.rs/apollo-router" diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml index c386463caf..59c3ddf255 100644 --- a/dockerfiles/tracing/docker-compose.datadog.yml +++ b/dockerfiles/tracing/docker-compose.datadog.yml @@ -3,7 +3,7 @@ services: apollo-router: container_name: apollo-router - image: ghcr.io/apollographql/router:v1.29.0-rc.1 + image: ghcr.io/apollographql/router:v1.29.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/datadog.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.jaeger.yml b/dockerfiles/tracing/docker-compose.jaeger.yml index 072e4d3ad5..0de6214836 100644 --- a/dockerfiles/tracing/docker-compose.jaeger.yml +++ b/dockerfiles/tracing/docker-compose.jaeger.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router #build: ./router - image: ghcr.io/apollographql/router:v1.29.0-rc.1 + image: ghcr.io/apollographql/router:v1.29.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/jaeger.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml index c45f63aef3..60f98be281 100644 --- a/dockerfiles/tracing/docker-compose.zipkin.yml +++ b/dockerfiles/tracing/docker-compose.zipkin.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router build: ./router - image: ghcr.io/apollographql/router:v1.29.0-rc.1 + image: ghcr.io/apollographql/router:v1.29.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/zipkin.router.yaml:/etc/config/configuration.yaml diff --git a/docs/source/containerization/docker.mdx b/docs/source/containerization/docker.mdx index 6eca789cde..b985e69b33 100644 --- a/docs/source/containerization/docker.mdx +++ b/docs/source/containerization/docker.mdx @@ -11,7 +11,7 @@ The default behaviour of the router images is suitable for a quickstart or devel Note: The [docker documentation](https://docs.docker.com/engine/reference/run/) for the run command may be helpful when reading through the examples. -Note: The exact image version to use is your choice depending on which release you wish to use. In the following examples, replace `` with your chosen version. e.g.: `v1.29.0-rc.1` +Note: The exact image version to use is your choice depending on which release you wish to use. In the following examples, replace `` with your chosen version. e.g.: `v1.29.0` ## Override the configuration diff --git a/docs/source/containerization/kubernetes.mdx b/docs/source/containerization/kubernetes.mdx index 126b6677b9..35d620c49a 100644 --- a/docs/source/containerization/kubernetes.mdx +++ b/docs/source/containerization/kubernetes.mdx @@ -13,7 +13,7 @@ import { Link } from 'gatsby'; [Helm](https://helm.sh) is the package manager for kubernetes. -There is a complete [helm chart definition](https://github.com/apollographql/router/tree/v1.29.0-rc.1/helm/chart/router) in the repo which illustrates how to use helm to deploy the router in kubernetes. +There is a complete [helm chart definition](https://github.com/apollographql/router/tree/v1.29.0/helm/chart/router) in the repo which illustrates how to use helm to deploy the router in kubernetes. In both the following examples, we are using helm to install the router: - into namespace "router-deploy" (create namespace if it doesn't exist) @@ -64,10 +64,10 @@ kind: ServiceAccount metadata: name: release-name-router labels: - helm.sh/chart: router-1.29.0-rc.1 + helm.sh/chart: router-1.29.0 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.29.0-rc.1" + app.kubernetes.io/version: "v1.29.0" app.kubernetes.io/managed-by: Helm --- # Source: router/templates/secret.yaml @@ -76,10 +76,10 @@ kind: Secret metadata: name: "release-name-router" labels: - helm.sh/chart: router-1.29.0-rc.1 + helm.sh/chart: router-1.29.0 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.29.0-rc.1" + app.kubernetes.io/version: "v1.29.0" app.kubernetes.io/managed-by: Helm data: managedFederationApiKey: "UkVEQUNURUQ=" @@ -90,10 +90,10 @@ kind: ConfigMap metadata: name: release-name-router labels: - helm.sh/chart: router-1.29.0-rc.1 + helm.sh/chart: router-1.29.0 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.29.0-rc.1" + app.kubernetes.io/version: "v1.29.0" app.kubernetes.io/managed-by: Helm data: configuration.yaml: | @@ -117,10 +117,10 @@ kind: Service metadata: name: release-name-router labels: - helm.sh/chart: router-1.29.0-rc.1 + helm.sh/chart: router-1.29.0 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.29.0-rc.1" + app.kubernetes.io/version: "v1.29.0" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -143,10 +143,10 @@ kind: Deployment metadata: name: release-name-router labels: - helm.sh/chart: router-1.29.0-rc.1 + helm.sh/chart: router-1.29.0 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.29.0-rc.1" + app.kubernetes.io/version: "v1.29.0" app.kubernetes.io/managed-by: Helm annotations: @@ -174,7 +174,7 @@ spec: - name: router securityContext: {} - image: "ghcr.io/apollographql/router:v1.29.0-rc.1" + image: "ghcr.io/apollographql/router:v1.29.0" imagePullPolicy: IfNotPresent args: - --hot-reload @@ -226,10 +226,10 @@ kind: Pod metadata: name: "release-name-router-test-connection" labels: - helm.sh/chart: router-1.29.0-rc.1 + helm.sh/chart: router-1.29.0 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.29.0-rc.1" + app.kubernetes.io/version: "v1.29.0" app.kubernetes.io/managed-by: Helm annotations: "helm.sh/hook": test diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml index d1088de86f..222941405c 100644 --- a/helm/chart/router/Chart.yaml +++ b/helm/chart/router/Chart.yaml @@ -20,10 +20,10 @@ type: application # so it matches the shape of our release process and release automation. # By proxy of that decision, this version uses SemVer 2.0.0, though the prefix # of "v" is not included. -version: 1.29.0-rc.1 +version: 1.29.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "v1.29.0-rc.1" \ No newline at end of file +appVersion: "v1.29.0" \ No newline at end of file diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md index 8803d4a176..399399a4ce 100644 --- a/helm/chart/router/README.md +++ b/helm/chart/router/README.md @@ -2,7 +2,7 @@ [router](https://github.com/apollographql/router) Rust Graph Routing runtime for Apollo Federation -![Version: 1.29.0-rc.1](https://img.shields.io/badge/Version-1.29.0--rc.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.29.0-rc.1](https://img.shields.io/badge/AppVersion-v1.29.0--rc.1-informational?style=flat-square) +![Version: 1.29.0](https://img.shields.io/badge/Version-1.29.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.29.0](https://img.shields.io/badge/AppVersion-v1.29.0-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ ## Get Repo Info ```console -helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.29.0-rc.1 +helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.29.0 ``` ## Install Chart @@ -19,7 +19,7 @@ helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.29.0-rc.1 **Important:** only helm3 is supported ```console -helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.29.0-rc.1 --values my-values.yaml +helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.29.0 --values my-values.yaml ``` _See [configuration](#configuration) below._ @@ -92,6 +92,3 @@ helm show values oci://ghcr.io/apollographql/helm-charts/router | terminationGracePeriodSeconds | int | `30` | Sets the [termination grace period](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-handler-execution) for Deployment pods | | tolerations | list | `[]` | | | virtualservice.enabled | bool | `false` | | - ----------------------------------------------- -Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/licenses.html b/licenses.html index edc43d8893..7bd8e2bc23 100644 --- a/licenses.html +++ b/licenses.html @@ -44,10 +44,10 @@

    Third Party Licenses

    Overview of licenses:

    Copyright [2022] [Bryn Cooke]
     
    +Licensed under the Apache License, Version 2.0 (the "License");
    +you may not use this file except in compliance with the License.
    +You may obtain a copy of the License at
    +
    +    http://www.apache.org/licenses/LICENSE-2.0
    +
    +Unless required by applicable law or agreed to in writing, software
    +distributed under the License is distributed on an "AS IS" BASIS,
    +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    +See the License for the specific language governing permissions and
    +limitations under the License.
    +
    + +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    Licensed under the Apache License, Version 2.0
    +<LICENSE-APACHE or
    +http://www.apache.org/licenses/LICENSE-2.0> or the MIT
    +license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
    +at your option. All files in the project carrying such
    +notice may not be copied, modified, or distributed except
    +according to those terms.
    +
    +
  • +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    MIT OR Apache-2.0
    +
  • +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    MIT or Apache-2.0
    +
    +
  • +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    The Apache License, Version 2.0 (Apache-2.0)
    +
    +Copyright 2015-2020 the fiat-crypto authors (see the AUTHORS file)
    +
     Licensed under the Apache License, Version 2.0 (the "License");
     you may not use this file except in compliance with the License.
     You may obtain a copy of the License at
    @@ -11377,6 +11499,151 @@ 

    Used by:

    LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +
    +
  • +
  • +

    BSD 3-Clause "New" or "Revised" License

    +

    Used by:

    + +
    Copyright (c) 2016-2019 Isis Agora Lovecruft, Henry de Valence. All rights reserved.
    +
    +Redistribution and use in source and binary forms, with or without
    +modification, are permitted provided that the following conditions are
    +met:
    +
    +1. Redistributions of source code must retain the above copyright
    +notice, this list of conditions and the following disclaimer.
    +
    +2. Redistributions in binary form must reproduce the above copyright
    +notice, this list of conditions and the following disclaimer in the
    +documentation and/or other materials provided with the distribution.
    +
    +3. Neither the name of the copyright holder nor the names of its
    +contributors may be used to endorse or promote products derived from
    +this software without specific prior written permission.
    +
    +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
    +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
    +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
    +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
    +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
    +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
    +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
    +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
    +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
    +
    +========================================================================
    +
    +Portions of curve25519-dalek were originally derived from Adam Langley's
    +Go ed25519 implementation, found at <https://github.com/agl/ed25519/>,
    +under the following licence:
    +
    +========================================================================
    +
    +Copyright (c) 2012 The Go Authors. All rights reserved.
    +
    +Redistribution and use in source and binary forms, with or without
    +modification, are permitted provided that the following conditions are
    +met:
    +
    +   * Redistributions of source code must retain the above copyright
    +notice, this list of conditions and the following disclaimer.
    +   * Redistributions in binary form must reproduce the above
    +copyright notice, this list of conditions and the following disclaimer
    +in the documentation and/or other materials provided with the
    +distribution.
    +   * Neither the name of Google Inc. nor the names of its
    +contributors may be used to endorse or promote products derived from
    +this software without specific prior written permission.
    +
    +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
    +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
    +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
    +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
    +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
    +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
    +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
    +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
    +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
    +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
    +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    +
    +
  • +
  • +

    BSD 3-Clause "New" or "Revised" License

    +

    Used by:

    + +
    Copyright (c) 2016-2021 isis agora lovecruft. All rights reserved.
    +Copyright (c) 2016-2021 Henry de Valence. All rights reserved.
    +
    +Redistribution and use in source and binary forms, with or without
    +modification, are permitted provided that the following conditions are
    +met:
    +
    +1. Redistributions of source code must retain the above copyright
    +notice, this list of conditions and the following disclaimer.
    +
    +2. Redistributions in binary form must reproduce the above copyright
    +notice, this list of conditions and the following disclaimer in the
    +documentation and/or other materials provided with the distribution.
    +
    +3. Neither the name of the copyright holder nor the names of its
    +contributors may be used to endorse or promote products derived from
    +this software without specific prior written permission.
    +
    +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
    +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
    +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
    +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
    +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
    +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
    +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
    +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
    +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
    +
    +========================================================================
    +
    +Portions of curve25519-dalek were originally derived from Adam Langley's
    +Go ed25519 implementation, found at <https://github.com/agl/ed25519/>,
    +under the following licence:
    +
    +========================================================================
    +
    +Copyright (c) 2012 The Go Authors. All rights reserved.
    +
    +Redistribution and use in source and binary forms, with or without
    +modification, are permitted provided that the following conditions are
    +met:
    +
    +   * Redistributions of source code must retain the above copyright
    +notice, this list of conditions and the following disclaimer.
    +   * Redistributions in binary form must reproduce the above
    +copyright notice, this list of conditions and the following disclaimer
    +in the documentation and/or other materials provided with the
    +distribution.
    +   * Neither the name of Google Inc. nor the names of its
    +contributors may be used to endorse or promote products derived from
    +this software without specific prior written permission.
    +
    +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
    +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
    +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
    +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
    +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
    +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
    +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
    +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
    +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
    +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
    +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     
  • @@ -11456,8 +11723,6 @@

    BSD 3-Clause "New" or "Revised" Licens

    Used by:

    Copyright (c) <year> <owner>. 
     
    @@ -13240,6 +13505,61 @@ 

    Used by:

    OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +
    +
  • +
  • +

    MIT License

    +

    Used by:

    + +
    Copyright (c) 2023 Tokio Contributors
    +
    +Permission is hereby granted, free of charge, to any
    +person obtaining a copy of this software and associated
    +documentation files (the "Software"), to deal in the
    +Software without restriction, including without
    +limitation the rights to use, copy, modify, merge,
    +publish, distribute, sublicense, and/or sell copies of
    +the Software, and to permit persons to whom the Software
    +is furnished to do so, subject to the following
    +conditions:
    +
    +The above copyright notice and this permission notice
    +shall be included in all copies or substantial portions
    +of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
    +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
    +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
    +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
    +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
    +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
    +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
    +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
    +DEALINGS IN THE SOFTWARE.
    +
    +The MIT License (MIT)
    +
    +Copyright (c) 2019 Yoshua Wuyts
    +
    +Permission is hereby granted, free of charge, to any person obtaining a copy
    +of this software and associated documentation files (the "Software"), to deal
    +in the Software without restriction, including without limitation the rights
    +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    +copies of the Software, and to permit persons to whom the Software is
    +furnished to do so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in all
    +copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    +SOFTWARE.
     
  • @@ -13827,7 +14147,6 @@

    Used by:

  • jsonschema
  • lazy-regex-proc_macros
  • serde_v8
  • -
  • tokio-macros
  • v8
  • valuable
  • void
  • @@ -14120,12 +14439,42 @@

    Used by:

    LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +
  • +

    MIT License

    +

    Used by:

    + +
    The MIT License (MIT)
    +
    +Copyright (c) 2014 Mathijs van de Nes
    +
    +Permission is hereby granted, free of charge, to any person obtaining a copy
    +of this software and associated documentation files (the "Software"), to deal
    +in the Software without restriction, including without limitation the rights
    +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    +copies of the Software, and to permit persons to whom the Software is
    +furnished to do so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in all
    +copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    +SOFTWARE.
    +
  • MIT License

    Used by:

    The MIT License (MIT)
     
    @@ -14774,7 +15123,14 @@ 

    MIT License

    Used by:

    This project is dual-licensed under the Unlicense and MIT licenses.
     
    @@ -15622,50 +15978,26 @@ 

    Used by:

    UNICODE, INC. LICENSE AGREEMENT - DATA FILES AND SOFTWARE
     
    -See Terms of Use <https://www.unicode.org/copyright.html>
    -for definitions of Unicode Inc.’s Data Files and Software.
    +Unicode Data Files include all data files under the directories http://www.unicode.org/Public/, http://www.unicode.org/reports/, http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and http://www.unicode.org/utility/trac/browser/.
    +
    +Unicode Data Files do not include PDF online code charts under the directory http://www.unicode.org/Public/.
    +
    +Software includes any source code published in the Unicode Standard or under the directories http://www.unicode.org/Public/, http://www.unicode.org/reports/, http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and http://www.unicode.org/utility/trac/browser/.
     
    -NOTICE TO USER: Carefully read the following legal agreement.
    -BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S
    -DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"),
    -YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE
    -TERMS AND CONDITIONS OF THIS AGREEMENT.
    -IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE
    -THE DATA FILES OR SOFTWARE.
    +NOTICE TO USER: Carefully read the following legal agreement. BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"), YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE THE DATA FILES OR SOFTWARE.
     
     COPYRIGHT AND PERMISSION NOTICE
     
    -Copyright © 1991-2022 Unicode, Inc. All rights reserved.
    -Distributed under the Terms of Use in https://www.unicode.org/copyright.html.
    +Copyright © 1991-2016 Unicode, Inc. All rights reserved. Distributed under the Terms of Use in http://www.unicode.org/copyright.html.
     
    -Permission is hereby granted, free of charge, to any person obtaining
    -a copy of the Unicode data files and any associated documentation
    -(the "Data Files") or Unicode software and any associated documentation
    -(the "Software") to deal in the Data Files or Software
    -without restriction, including without limitation the rights to use,
    -copy, modify, merge, publish, distribute, and/or sell copies of
    -the Data Files or Software, and to permit persons to whom the Data Files
    -or Software are furnished to do so, provided that either
    -(a) this copyright and permission notice appear with all copies
    -of the Data Files or Software, or
    -(b) this copyright and permission notice appear in associated
    -Documentation.
    -
    -THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
    -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
    -WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
    -NONINFRINGEMENT OF THIRD PARTY RIGHTS.
    -IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
    -NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
    -DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
    -DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
    -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
    -PERFORMANCE OF THE DATA FILES OR SOFTWARE.
    -
    -Except as contained in this notice, the name of a copyright holder
    -shall not be used in advertising or otherwise to promote the sale,
    -use or other dealings in these Data Files or Software without prior
    -written authorization of the copyright holder.
    +Permission is hereby granted, free of charge, to any person obtaining a copy of the Unicode data files and any associated documentation (the "Data Files") or Unicode software and any associated documentation (the "Software") to deal in the Data Files or Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, and/or sell copies of the Data Files or Software, and to permit persons to whom the Data Files or Software are furnished to do so, provided that either
    +
    +     (a) this copyright and permission notice appear with all copies of the Data Files or Software, or
    +     (b) this copyright and permission notice appear in associated Documentation.
    +
    +THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THE DATA FILES OR SOFTWARE.
    +
    +Except as contained in this notice, the name of a copyright holder shall not be used in advertising or otherwise to promote the sale, use or other dealings in these Data Files or Software without prior written authorization of the copyright holder.
     
  • diff --git a/scripts/install.sh b/scripts/install.sh index b9ba3c7cf6..6e3758cff5 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="https://github.com/apollographql/router/releases/downloa # Router version defined in apollo-router's Cargo.toml # Note: Change this line manually during the release steps. -PACKAGE_VERSION="v1.29.0-rc.1" +PACKAGE_VERSION="v1.29.0" download_binary() { downloader --check From 4222fbe5fcc8886d4ce6e76fa204c4b80bfdd27f Mon Sep 17 00:00:00 2001 From: Maria Elisabeth Schreiber Date: Mon, 4 Sep 2023 08:30:51 -0600 Subject: [PATCH 32/81] docs: fix broken links (#3711) This PR removes a broken anchor link [reported by Zillow](https://apollograph.slack.com/archives/C0721M2F6/p1693434962231179) and removes some links about authorization directives. (The changes were originally intended to launch with the authorization directive docs. I will revert the changes in [that PR](https://github.com/apollographql/router/pull/3673).) --- .changesets/docs_fix_broken_links.md | 5 +++++ docs/source/configuration/cors.mdx | 2 +- docs/source/customizations/coprocessor.mdx | 4 ++-- 3 files changed, 8 insertions(+), 3 deletions(-) create mode 100644 .changesets/docs_fix_broken_links.md diff --git a/.changesets/docs_fix_broken_links.md b/.changesets/docs_fix_broken_links.md new file mode 100644 index 0000000000..d395aa0c07 --- /dev/null +++ b/.changesets/docs_fix_broken_links.md @@ -0,0 +1,5 @@ +### Fix broken links + +This documentation change fixes an incorrect anchor link in the [CORS documentation](https://www.apollographql.com/docs/router/configuration/cors/) and removes links to authorization docs which have not yet been released. + +By [@Meschreiber](https://github.com/Meschreiber) in https://github.com/apollographql/router/pull/3711 diff --git a/docs/source/configuration/cors.mdx b/docs/source/configuration/cors.mdx index 1eecc849e5..fb09ae9981 100644 --- a/docs/source/configuration/cors.mdx +++ b/docs/source/configuration/cors.mdx @@ -19,7 +19,7 @@ By default, the Apollo Router enables _only_ Apollo Studio to initiate browser c * Use this option if your supergraph is a public API with arbitrarily many web app consumers. * With this option enabled, the router sends the [wildcard (`*`)](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Allow-Origin#directives) value for the `Access-Control-Allow-Origin` header. This enables _any_ website to initiate browser connections to it (but they can't provide cookies or other credentials). -* You _must_ use the `origins` + `match_origins` option if clients need to [authenticate their requests with cookies](#passing-credentials-with-cors). +* You _must_ use the `origins` + `match_origins` option if clients need to [authenticate their requests with cookies](#passing-credentials). The following snippet includes an example of each option (use either `allow_any_origin`, or `origins + match_origins`): diff --git a/docs/source/customizations/coprocessor.mdx b/docs/source/customizations/coprocessor.mdx index 0a5040e765..5f02be67c3 100644 --- a/docs/source/customizations/coprocessor.mdx +++ b/docs/source/customizations/coprocessor.mdx @@ -901,7 +901,7 @@ Subsequent response chunks omit the `headers` and `statusCode` fields: ## Adding authorization claims via coprocessor -To use the [authorization directives](../configuration/authorization#authorization-directives), a request needs to include **claims**—the details of its authentication and scope. The most straightforward way to add claims is with [JWT authentication](../configuration/./authn-jwt). You can also add claims with a [`RouterService` coprocessor](#how-it-works) since it hooks into the request lifecycle directly after the router has received a client request. +For authorization purposes, your requests may need to include **claims**—the details of its authentication and scope. The most straightforward way to add claims is with [JWT authentication](../configuration/./authn-jwt). You can also add claims with a [`RouterService` coprocessor](#how-it-works) since it hooks into the request lifecycle directly after the router has received a client request. The router configuration needs to include at least these settings: @@ -930,7 +930,7 @@ This configuration prompts the router to send an HTTP POST request to your copro } ``` -When your coprocessor receives this request from the router, it should add claims to the request's [`context`](#context) and return them in the response to the router. Specifically, the coprocessor should add an entry with a claims object. The key must be `apollo_authentication::JWT::claims`, and the value should be the claims required by the authorization directives you intend to use. For example, if you want to use [`@requireScopes`](../configuration/authorization#requiresscopes), the response may look something like this: +When your coprocessor receives this request from the router, it should add claims to the request's [`context`](#context) and return them in the response to the router. Specifically, the coprocessor should add an entry with a claims object. The key must be `apollo_authentication::JWT::claims`, and the value should be the claims required for authorization. For example: ```json { From 97b09b33c07f3d681a1f145d0cc354afa401b10e Mon Sep 17 00:00:00 2001 From: o0Ignition0o Date: Mon, 4 Sep 2023 17:25:38 +0200 Subject: [PATCH 33/81] Fix: Allow anonymous operation_name to be set in the context. This change allows the contexts OPERATION_NAME to be set to None. --- apollo-router/src/context/mod.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/apollo-router/src/context/mod.rs b/apollo-router/src/context/mod.rs index 07042e08a8..8313768848 100644 --- a/apollo-router/src/context/mod.rs +++ b/apollo-router/src/context/mod.rs @@ -74,7 +74,7 @@ impl Context { // This method should be removed once we have a proper way to get the operation name. self.entries .get(OPERATION_NAME) - .map(|v| v.value().as_str().unwrap().to_string()) + .and_then(|v| v.value().as_str().map(|s| s.to_string())) } /// Returns true if the context contains a value for the specified key. @@ -307,7 +307,7 @@ impl Default for BusyTimer { #[cfg(test)] mod test { - use crate::Context; + use crate::{context::OPERATION_NAME, Context}; #[test] fn test_context_insert() { @@ -370,4 +370,11 @@ mod test { assert_eq!(c.get("one").unwrap(), Some(2)); assert_eq!(c.get("two").unwrap(), Some(3)); } + + #[test] + fn operation_name_defaults_to_an_empty_string() { + let c = Context::new(); + c.insert(OPERATION_NAME, Option::::None).unwrap(); + assert!(c.operation_name().is_none()) + } } From 2a7e4b5a2dcc2b77d2a03c5fa58db44201aa2c0d Mon Sep 17 00:00:00 2001 From: o0Ignition0o Date: Mon, 4 Sep 2023 17:39:07 +0200 Subject: [PATCH 34/81] lint --- apollo-router/src/context/mod.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/apollo-router/src/context/mod.rs b/apollo-router/src/context/mod.rs index 8313768848..8b3954aa0d 100644 --- a/apollo-router/src/context/mod.rs +++ b/apollo-router/src/context/mod.rs @@ -307,7 +307,8 @@ impl Default for BusyTimer { #[cfg(test)] mod test { - use crate::{context::OPERATION_NAME, Context}; + use crate::context::OPERATION_NAME; + use crate::Context; #[test] fn test_context_insert() { From 8659ca8f938e4bd9c943418f7982003651a0cde6 Mon Sep 17 00:00:00 2001 From: o0Ignition0o Date: Mon, 4 Sep 2023 17:59:36 +0200 Subject: [PATCH 35/81] remove the function alltogether, along with its call --- apollo-router/src/context/mod.rs | 14 -------------- apollo-router/src/services/subgraph_service.rs | 8 +++++++- 2 files changed, 7 insertions(+), 15 deletions(-) diff --git a/apollo-router/src/context/mod.rs b/apollo-router/src/context/mod.rs index 8b3954aa0d..bb45b46963 100644 --- a/apollo-router/src/context/mod.rs +++ b/apollo-router/src/context/mod.rs @@ -70,13 +70,6 @@ impl Context { } impl Context { - pub(crate) fn operation_name(&self) -> Option { - // This method should be removed once we have a proper way to get the operation name. - self.entries - .get(OPERATION_NAME) - .and_then(|v| v.value().as_str().map(|s| s.to_string())) - } - /// Returns true if the context contains a value for the specified key. pub fn contains_key(&self, key: K) -> bool where @@ -371,11 +364,4 @@ mod test { assert_eq!(c.get("one").unwrap(), Some(2)); assert_eq!(c.get("two").unwrap(), Some(3)); } - - #[test] - fn operation_name_defaults_to_an_empty_string() { - let c = Context::new(); - c.insert(OPERATION_NAME, Option::::None).unwrap(); - assert!(c.operation_name().is_none()) - } } diff --git a/apollo-router/src/services/subgraph_service.rs b/apollo-router/src/services/subgraph_service.rs index e137177a7c..c99e482786 100644 --- a/apollo-router/src/services/subgraph_service.rs +++ b/apollo-router/src/services/subgraph_service.rs @@ -431,6 +431,13 @@ async fn call_websocket( subgraph_cfg: &WebSocketConfiguration, subscription_hash: String, ) -> Result { + let operation_name = request + .subgraph_request + .body() + .operation_name + .clone() + .unwrap_or_default(); + let SubgraphRequest { subgraph_request, subscription_stream, @@ -445,7 +452,6 @@ async fn call_websocket( let (handle, created) = notify .create_or_subscribe(subscription_hash.clone(), false) .await?; - let operation_name = context.operation_name().unwrap_or_default(); tracing::info!( monotonic_counter.apollo.router.operations.subscriptions = 1u64, subscriptions.mode = %"passthrough", From 262d690b0d2d9657d159884570f346d2cc1b7fcb Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Mon, 4 Sep 2023 15:56:56 +0000 Subject: [PATCH 36/81] changeset --- .changesets/fix_dragonfly_ship_win_folder.md | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 .changesets/fix_dragonfly_ship_win_folder.md diff --git a/.changesets/fix_dragonfly_ship_win_folder.md b/.changesets/fix_dragonfly_ship_win_folder.md new file mode 100644 index 0000000000..892df67579 --- /dev/null +++ b/.changesets/fix_dragonfly_ship_win_folder.md @@ -0,0 +1,9 @@ +### Subscriptions: Correct v1.28.x regression allowing panic via un-named subscription operation + +Correct a regression that was introduced in Router v1.28.0 which made a Router **panic** possible when the following _three_ conditions are _all_ met: + +1. When sending an un-named (i.e., "anonymous") `subscription` operation (e.g., `subscription { ... }`); **and**; +2. The Router has a `subscription` type defined in the Supergraph schema; **and** +3. Have subscriptions enabled (they are disabled by default) in the Router's YAML configuration, either by setting `enabled: true` _or_ by setting a `mode` within the `subscriptions` object (as seen in [the subscriptions documentation](https://www.apollographql.com/docs/router/executing-operations/subscription-support/#router-setup). + +By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/3738 From 541d6611aeca8e85bbeca9747564d41b5fe5cd45 Mon Sep 17 00:00:00 2001 From: o0Ignition0o Date: Mon, 4 Sep 2023 18:01:45 +0200 Subject: [PATCH 37/81] remove unused import --- apollo-router/src/context/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/apollo-router/src/context/mod.rs b/apollo-router/src/context/mod.rs index bb45b46963..83bdb2bc80 100644 --- a/apollo-router/src/context/mod.rs +++ b/apollo-router/src/context/mod.rs @@ -300,7 +300,6 @@ impl Default for BusyTimer { #[cfg(test)] mod test { - use crate::context::OPERATION_NAME; use crate::Context; #[test] From 72667fbeab55bf4b97c3b6e2ddfc8398a5253939 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Mon, 4 Sep 2023 18:50:04 +0200 Subject: [PATCH 38/81] docs: graduate Authorization to preview from experimental (#3739) --- docs/source/config.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/config.json b/docs/source/config.json index 28f2d33a7d..73d6f03a0c 100644 --- a/docs/source/config.json +++ b/docs/source/config.json @@ -46,7 +46,7 @@ "/configuration/authorization", [ "enterprise", - "experimental" + "preview" ] ], "Subgraph Authentication": "/configuration/authn-subgraph", From 56bf9103e96512ddb3d7bc65934c4d9aa630688e Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Mon, 4 Sep 2023 16:39:23 +0000 Subject: [PATCH 39/81] prep release: v1.29.1 --- .changesets/fix_dragonfly_ship_win_folder.md | 9 ------ CHANGELOG.md | 16 +++++++++++ Cargo.lock | 6 ++-- apollo-router-benchmarks/Cargo.toml | 2 +- apollo-router-scaffold/Cargo.toml | 2 +- .../templates/base/Cargo.toml | 2 +- .../templates/base/xtask/Cargo.toml | 2 +- apollo-router/Cargo.toml | 2 +- .../tracing/docker-compose.datadog.yml | 2 +- dockerfiles/tracing/docker-compose.jaeger.yml | 2 +- dockerfiles/tracing/docker-compose.zipkin.yml | 2 +- docs/source/containerization/docker.mdx | 2 +- docs/source/containerization/kubernetes.mdx | 28 +++++++++---------- helm/chart/router/Chart.yaml | 4 +-- helm/chart/router/README.md | 9 ++++-- licenses.html | 2 +- scripts/install.sh | 2 +- 17 files changed, 52 insertions(+), 42 deletions(-) delete mode 100644 .changesets/fix_dragonfly_ship_win_folder.md diff --git a/.changesets/fix_dragonfly_ship_win_folder.md b/.changesets/fix_dragonfly_ship_win_folder.md deleted file mode 100644 index 892df67579..0000000000 --- a/.changesets/fix_dragonfly_ship_win_folder.md +++ /dev/null @@ -1,9 +0,0 @@ -### Subscriptions: Correct v1.28.x regression allowing panic via un-named subscription operation - -Correct a regression that was introduced in Router v1.28.0 which made a Router **panic** possible when the following _three_ conditions are _all_ met: - -1. When sending an un-named (i.e., "anonymous") `subscription` operation (e.g., `subscription { ... }`); **and**; -2. The Router has a `subscription` type defined in the Supergraph schema; **and** -3. Have subscriptions enabled (they are disabled by default) in the Router's YAML configuration, either by setting `enabled: true` _or_ by setting a `mode` within the `subscriptions` object (as seen in [the subscriptions documentation](https://www.apollographql.com/docs/router/executing-operations/subscription-support/#router-setup). - -By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/3738 diff --git a/CHANGELOG.md b/CHANGELOG.md index 24b7c15460..c81ea7fb0a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,22 @@ All notable changes to Router will be documented in this file. This project adheres to [Semantic Versioning v2.0.0](https://semver.org/spec/v2.0.0.html). +# [1.29.1] - 2023-09-04 + +## 🐛 Fixes + +### Subscriptions: Correct v1.28.x regression allowing panic via un-named subscription operation + +Correct a regression that was introduced in Router v1.28.0 which made a Router **panic** possible when the following _three_ conditions are _all_ met: + +1. When sending an un-named (i.e., "anonymous") `subscription` operation (e.g., `subscription { ... }`); **and**; +2. The Router has a `subscription` type defined in the Supergraph schema; **and** +3. Have subscriptions enabled (they are disabled by default) in the Router's YAML configuration, either by setting `enabled: true` _or_ by setting a `mode` within the `subscriptions` object (as seen in [the subscriptions documentation](https://www.apollographql.com/docs/router/executing-operations/subscription-support/#router-setup). + +By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/3738 + + + # [1.29.0] - 2023-09-04 ## 🚀 Features diff --git a/Cargo.lock b/Cargo.lock index d02de46940..705e12df56 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -263,7 +263,7 @@ dependencies = [ [[package]] name = "apollo-router" -version = "1.29.0" +version = "1.29.1" dependencies = [ "access-json", "anyhow", @@ -408,7 +408,7 @@ dependencies = [ [[package]] name = "apollo-router-benchmarks" -version = "1.29.0" +version = "1.29.1" dependencies = [ "apollo-parser 0.6.1", "apollo-router", @@ -424,7 +424,7 @@ dependencies = [ [[package]] name = "apollo-router-scaffold" -version = "1.29.0" +version = "1.29.1" dependencies = [ "anyhow", "cargo-scaffold", diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml index f9cfc72f03..b0a6f0cf9e 100644 --- a/apollo-router-benchmarks/Cargo.toml +++ b/apollo-router-benchmarks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-benchmarks" -version = "1.29.0" +version = "1.29.1" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml index 2c006fa9b9..9a6031ec2d 100644 --- a/apollo-router-scaffold/Cargo.toml +++ b/apollo-router-scaffold/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-scaffold" -version = "1.29.0" +version = "1.29.1" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "Elastic-2.0" diff --git a/apollo-router-scaffold/templates/base/Cargo.toml b/apollo-router-scaffold/templates/base/Cargo.toml index f327476570..0181336ac6 100644 --- a/apollo-router-scaffold/templates/base/Cargo.toml +++ b/apollo-router-scaffold/templates/base/Cargo.toml @@ -22,7 +22,7 @@ apollo-router = { path ="{{integration_test}}apollo-router" } apollo-router = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} # Note if you update these dependencies then also update xtask/Cargo.toml -apollo-router = "1.29.0" +apollo-router = "1.29.1" {{/if}} {{/if}} async-trait = "0.1.52" diff --git a/apollo-router-scaffold/templates/base/xtask/Cargo.toml b/apollo-router-scaffold/templates/base/xtask/Cargo.toml index 167ef3c605..080ab782cb 100644 --- a/apollo-router-scaffold/templates/base/xtask/Cargo.toml +++ b/apollo-router-scaffold/templates/base/xtask/Cargo.toml @@ -13,7 +13,7 @@ apollo-router-scaffold = { path ="{{integration_test}}apollo-router-scaffold" } {{#if branch}} apollo-router-scaffold = { git="https://github.com/apollographql/router.git", branch="{{branch}}" } {{else}} -apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.29.0" } +apollo-router-scaffold = { git = "https://github.com/apollographql/router.git", tag = "v1.29.1" } {{/if}} {{/if}} anyhow = "1.0.58" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index de24279e81..c46cefb639 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router" -version = "1.29.0" +version = "1.29.1" authors = ["Apollo Graph, Inc. "] repository = "https://github.com/apollographql/router/" documentation = "https://docs.rs/apollo-router" diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml index 59c3ddf255..512b2ddceb 100644 --- a/dockerfiles/tracing/docker-compose.datadog.yml +++ b/dockerfiles/tracing/docker-compose.datadog.yml @@ -3,7 +3,7 @@ services: apollo-router: container_name: apollo-router - image: ghcr.io/apollographql/router:v1.29.0 + image: ghcr.io/apollographql/router:v1.29.1 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/datadog.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.jaeger.yml b/dockerfiles/tracing/docker-compose.jaeger.yml index 0de6214836..e2053883b7 100644 --- a/dockerfiles/tracing/docker-compose.jaeger.yml +++ b/dockerfiles/tracing/docker-compose.jaeger.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router #build: ./router - image: ghcr.io/apollographql/router:v1.29.0 + image: ghcr.io/apollographql/router:v1.29.1 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/jaeger.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml index 60f98be281..89539bc852 100644 --- a/dockerfiles/tracing/docker-compose.zipkin.yml +++ b/dockerfiles/tracing/docker-compose.zipkin.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router build: ./router - image: ghcr.io/apollographql/router:v1.29.0 + image: ghcr.io/apollographql/router:v1.29.1 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/zipkin.router.yaml:/etc/config/configuration.yaml diff --git a/docs/source/containerization/docker.mdx b/docs/source/containerization/docker.mdx index b985e69b33..ebcb148767 100644 --- a/docs/source/containerization/docker.mdx +++ b/docs/source/containerization/docker.mdx @@ -11,7 +11,7 @@ The default behaviour of the router images is suitable for a quickstart or devel Note: The [docker documentation](https://docs.docker.com/engine/reference/run/) for the run command may be helpful when reading through the examples. -Note: The exact image version to use is your choice depending on which release you wish to use. In the following examples, replace `` with your chosen version. e.g.: `v1.29.0` +Note: The exact image version to use is your choice depending on which release you wish to use. In the following examples, replace `` with your chosen version. e.g.: `v1.29.1` ## Override the configuration diff --git a/docs/source/containerization/kubernetes.mdx b/docs/source/containerization/kubernetes.mdx index 35d620c49a..c61cdea299 100644 --- a/docs/source/containerization/kubernetes.mdx +++ b/docs/source/containerization/kubernetes.mdx @@ -13,7 +13,7 @@ import { Link } from 'gatsby'; [Helm](https://helm.sh) is the package manager for kubernetes. -There is a complete [helm chart definition](https://github.com/apollographql/router/tree/v1.29.0/helm/chart/router) in the repo which illustrates how to use helm to deploy the router in kubernetes. +There is a complete [helm chart definition](https://github.com/apollographql/router/tree/v1.29.1/helm/chart/router) in the repo which illustrates how to use helm to deploy the router in kubernetes. In both the following examples, we are using helm to install the router: - into namespace "router-deploy" (create namespace if it doesn't exist) @@ -64,10 +64,10 @@ kind: ServiceAccount metadata: name: release-name-router labels: - helm.sh/chart: router-1.29.0 + helm.sh/chart: router-1.29.1 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.29.0" + app.kubernetes.io/version: "v1.29.1" app.kubernetes.io/managed-by: Helm --- # Source: router/templates/secret.yaml @@ -76,10 +76,10 @@ kind: Secret metadata: name: "release-name-router" labels: - helm.sh/chart: router-1.29.0 + helm.sh/chart: router-1.29.1 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.29.0" + app.kubernetes.io/version: "v1.29.1" app.kubernetes.io/managed-by: Helm data: managedFederationApiKey: "UkVEQUNURUQ=" @@ -90,10 +90,10 @@ kind: ConfigMap metadata: name: release-name-router labels: - helm.sh/chart: router-1.29.0 + helm.sh/chart: router-1.29.1 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.29.0" + app.kubernetes.io/version: "v1.29.1" app.kubernetes.io/managed-by: Helm data: configuration.yaml: | @@ -117,10 +117,10 @@ kind: Service metadata: name: release-name-router labels: - helm.sh/chart: router-1.29.0 + helm.sh/chart: router-1.29.1 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.29.0" + app.kubernetes.io/version: "v1.29.1" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -143,10 +143,10 @@ kind: Deployment metadata: name: release-name-router labels: - helm.sh/chart: router-1.29.0 + helm.sh/chart: router-1.29.1 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.29.0" + app.kubernetes.io/version: "v1.29.1" app.kubernetes.io/managed-by: Helm annotations: @@ -174,7 +174,7 @@ spec: - name: router securityContext: {} - image: "ghcr.io/apollographql/router:v1.29.0" + image: "ghcr.io/apollographql/router:v1.29.1" imagePullPolicy: IfNotPresent args: - --hot-reload @@ -226,10 +226,10 @@ kind: Pod metadata: name: "release-name-router-test-connection" labels: - helm.sh/chart: router-1.29.0 + helm.sh/chart: router-1.29.1 app.kubernetes.io/name: router app.kubernetes.io/instance: release-name - app.kubernetes.io/version: "v1.29.0" + app.kubernetes.io/version: "v1.29.1" app.kubernetes.io/managed-by: Helm annotations: "helm.sh/hook": test diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml index 222941405c..69bdb630a8 100644 --- a/helm/chart/router/Chart.yaml +++ b/helm/chart/router/Chart.yaml @@ -20,10 +20,10 @@ type: application # so it matches the shape of our release process and release automation. # By proxy of that decision, this version uses SemVer 2.0.0, though the prefix # of "v" is not included. -version: 1.29.0 +version: 1.29.1 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "v1.29.0" \ No newline at end of file +appVersion: "v1.29.1" \ No newline at end of file diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md index 399399a4ce..983969d7a9 100644 --- a/helm/chart/router/README.md +++ b/helm/chart/router/README.md @@ -2,7 +2,7 @@ [router](https://github.com/apollographql/router) Rust Graph Routing runtime for Apollo Federation -![Version: 1.29.0](https://img.shields.io/badge/Version-1.29.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.29.0](https://img.shields.io/badge/AppVersion-v1.29.0-informational?style=flat-square) +![Version: 1.29.1](https://img.shields.io/badge/Version-1.29.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v1.29.1](https://img.shields.io/badge/AppVersion-v1.29.1-informational?style=flat-square) ## Prerequisites @@ -11,7 +11,7 @@ ## Get Repo Info ```console -helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.29.0 +helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.29.1 ``` ## Install Chart @@ -19,7 +19,7 @@ helm pull oci://ghcr.io/apollographql/helm-charts/router --version 1.29.0 **Important:** only helm3 is supported ```console -helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.29.0 --values my-values.yaml +helm upgrade --install [RELEASE_NAME] oci://ghcr.io/apollographql/helm-charts/router --version 1.29.1 --values my-values.yaml ``` _See [configuration](#configuration) below._ @@ -92,3 +92,6 @@ helm show values oci://ghcr.io/apollographql/helm-charts/router | terminationGracePeriodSeconds | int | `30` | Sets the [termination grace period](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-handler-execution) for Deployment pods | | tolerations | list | `[]` | | | virtualservice.enabled | bool | `false` | | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/licenses.html b/licenses.html index 7bd8e2bc23..bbaf4cfa5a 100644 --- a/licenses.html +++ b/licenses.html @@ -7157,6 +7157,7 @@

    Used by:

  • bytes-utils
  • cc
  • cfg-if
  • +
  • ci_info
  • cmake
  • concurrent-queue
  • const-random
  • @@ -10723,7 +10724,6 @@

    Used by:

    Apache License 2.0

    Used by:

    diff --git a/scripts/install.sh b/scripts/install.sh index 6e3758cff5..47dc365d05 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="https://github.com/apollographql/router/releases/downloa # Router version defined in apollo-router's Cargo.toml # Note: Change this line manually during the release steps. -PACKAGE_VERSION="v1.29.0" +PACKAGE_VERSION="v1.29.1" download_binary() { downloader --check From fd03dc4eaa569d197e9ded2b4e849ecba2c2a501 Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Mon, 4 Sep 2023 16:45:40 +0000 Subject: [PATCH 40/81] CHANGELOG edits for v1.28.x and v1.29.0 warnings --- CHANGELOG.md | 64 +++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 61 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c81ea7fb0a..220c940a7f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,34 @@ This project adheres to [Semantic Versioning v2.0.0](https://semver.org/spec/v2. # [1.29.1] - 2023-09-04 +## 🚀 Features + +### GraphOS Enterprise: Authorization ([PR #3397](https://github.com/apollographql/router/pull/3397), [PR #3662](https://github.com/apollographql/router/pull/3662)) + +> ⚠️ This is an [Enterprise feature](https://www.apollographql.com/blog/platform/evaluating-apollo-router-understanding-free-and-open-vs-commercial-features/) of the Apollo Router. It requires an organization with a [GraphOS Enterprise plan](https://www.apollographql.com/pricing/). + +If your organization doesn't currently have an Enterprise plan, you can test out this functionality by signing up for a free [Enterprise trial](https://www.apollographql.com/docs/graphos/org/plans/#enterprise-trials). +We introduce two new directives, `@requiresScopes` and `@authenticated`, that define authorization policies for fields and types in the supergraph schema, composed with Federation version 2.5.3 or higher. + +They are defined as follows: + +```graphql +scalar federation__Scope +directive @requiresScopes(scopes: [[federation__Scope!]!]!) on OBJECT | FIELD_DEFINITION | INTERFACE | SCALAR | ENUM +``` + +This directive allows granular access control through user-defined scopes. + +```graphql +directive @authenticated on OBJECT | FIELD_DEFINITION | INTERFACE | SCALAR | ENUM +``` + +This directive allows access to the annotated field or type for authenticated requests only. +For more information on how to use these directives, please read Apollo Router [docs](https://www.apollographql.com/docs/router/configuration/authorization) +``` + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3397 https://github.com/apollographql/router/pull/3662 + ## 🐛 Fixes ### Subscriptions: Correct v1.28.x regression allowing panic via un-named subscription operation @@ -19,9 +47,38 @@ Correct a regression that was introduced in Router v1.28.0 which made a Router * By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/3738 +### Update Deno to resolve Rust Docs generation failure ([Issue #3305](https://github.com/apollographql/router/issues/3305)) + +We've updated to the latest version of Deno (0.200) to fix errors when generating [docs.rs/apollo-router](https://docs.rs/crate/apollo-router/latest). + +By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/3626 + +### GraphQL response processing is now captured under the execution span ([PR #3732](https://github.com/apollographql/router/pull/3732)) + +Ensure processing is captured under the "execution" span. Previously, events would be reported under the supergraph span or — even more arbitrarily — any plugin's span (e.g., Rhai). + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3732 + +## 🛠 Maintenance + +### Apollo Uplink connections re-use the existing HTTP client ([Issue #3333](https://github.com/apollographql/router/issues/3333)) + +A single HTTP client will now be shared between requests of the same type when making requests to [Apollo Uplink](https://www.apollographql.com/docs/federation/managed-federation/uplink/) to fetch supergraphs, licenses and configuration from Studio. Previously, such requests created a new HTTP client on each periodic fetch which occasionally resulted in CPU spikes, especially on macOS. + +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/3703 + +### Remove unneeded schema parsing steps ([PR #3547](https://github.com/apollographql/router/pull/3547)) + +Access to a parsed schema is required in various parts of the Router. Previously were were parsing the schema multiple times, but this is now fixed. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3547 # [1.29.0] - 2023-09-04 +> **Warning** +> +> **This version has a critical bug impacting anyone using subscriptions. See the _Fixes_ in [v1.29.1](https://github.com/apollographql/router/releases/tag/v1.29.1) for details. We highly recommend using v1.29.1 over v1.29.0 when using subscriptions.** + ## 🚀 Features ### GraphOS Enterprise: Authorization ([PR #3397](https://github.com/apollographql/router/pull/3397), [PR #3662](https://github.com/apollographql/router/pull/3662)) @@ -48,7 +105,6 @@ This directive allows access to the annotated field or type for authenticated re For more information on how to use these directives, please read Apollo Router [docs](https://www.apollographql.com/docs/router/configuration/authorization) ``` - By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3397 https://github.com/apollographql/router/pull/3662 ## 🐛 Fixes @@ -79,10 +135,12 @@ Access to a parsed schema is required in various parts of the Router. Previously By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3547 - - # [1.28.1] - 2023-08-28 +> **Warning** +> +> **This version has a critical bug impacting anyone using subscriptions. See the _Fixes_ in [v1.29.1](https://github.com/apollographql/router/releases/tag/v1.29.1) for details. We highly recommend using v1.29.1 over any v1.28.x version when using subscriptions.** + ## 🚀 Features ### Expose the `stats_reports_key` hash to plugins. ([Issue #2728](https://github.com/apollographql/router/issues/2728)) From 09957b11a9bf4e2e20d1a719e51445c18f61e5c2 Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Mon, 4 Sep 2023 16:52:11 +0000 Subject: [PATCH 41/81] Remove invalid formatting of MD code-fence and trailing newlines. --- CHANGELOG.md | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 220c940a7f..f6b1d21712 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -103,7 +103,6 @@ directive @authenticated on OBJECT | FIELD_DEFINITION | INTERFACE | SCALAR | ENU This directive allows access to the annotated field or type for authenticated requests only. For more information on how to use these directives, please read Apollo Router [docs](https://www.apollographql.com/docs/router/configuration/authorization) -``` By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3397 https://github.com/apollographql/router/pull/3662 @@ -267,7 +266,7 @@ Several improvements to safelisting behavior based on preview feedback: * If introspection is enabled on the server, any operation whose top-level fields are introspection fields (`__type`, `__schema`, or `__typename`) is considered to be in the safelist. This special case is not applied if `require_id` is enabled, so that Router never parses freeform GraphQL in this mode. * When `log_unknown` is enabled and `apq` has not been disabled, the Router now logs any operation not in the safelist as unknown, even those sent via IDs if the operation was found in the APQ cache rather than the manifest. -* When `log_unknown` and `require_id` are both enabled, the Router now logs all operations that rejects (i.e., all operations sent as freeform GraphQL). +* When `log_unknown` and `require_id` are both enabled, the Router now logs all operations that rejects (i.e., all operations sent as freeform GraphQL). Previously, the Router only logged the operations that would have been rejected by the safelist feature with `require_id` disabled (i.e., operations sent as freeform GraphQL that do not match an operation in the manifest). @@ -681,7 +680,7 @@ By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollo # [1.24.0] - 2023-07-13 -***Note that this release contains a bug in query planning around query fragment reuse and should not be used. If upgrading, consider going straight to 1.25.0.*** +***Note that this release contains a bug in query planning around query fragment reuse and should not be used. If upgrading, consider going straight to 1.25.0.*** ## 🚀 Features @@ -705,7 +704,7 @@ By [@garypen](https://github.com/garypen) in https://github.com/apollographql/ro The GraphQL spec is rather light on what should happen when we process responses from subgraphs. The current behaviour within the Router was inconsistently short circuiting response processing and this producing confusing errors. > #### Processing the response -> +> > If the response uses a non-200 status code and the media type of the response payload is application/json then the client MUST NOT rely on the body to be a well-formed GraphQL response since the source of the response may not be the server but instead some intermediary such as API gateways, proxies, firewalls, etc. The logic has been simplified and made consistent using the following rules: @@ -919,7 +918,7 @@ By [@garypen](https://github.com/garypen) in https://github.com/apollographql/ro Root span name has changed from `request` to ` ` -[Open Telemetry graphql semantic conventions](https://opentelemetry.io/docs/specs/otel/trace/semantic_conventions/instrumentation/graphql/) specify that the root span name must match the operation kind and name. +[Open Telemetry graphql semantic conventions](https://opentelemetry.io/docs/specs/otel/trace/semantic_conventions/instrumentation/graphql/) specify that the root span name must match the operation kind and name. Many tracing providers don't have good support for filtering traces via attribute, so changing this significantly enhances the tracing experience. @@ -1018,7 +1017,7 @@ By [@EverlastingBugstopper](https://github.com/EverlastingBugstopper) in https:/ ### Add security-related warnings to JWT auth docs ([PR #3299](https://github.com/apollographql/router/pull/3299)) -There are a couple potential security pitfalls when leveraging the router for JWT authentication. These are now documented in [the relevant section of the docs](https://www.apollographql.com/docs/router/configuration/authn-jwt). If you are currently using JWT authentication in the router, be sure to [secure your subgraphs](https://www.apollographql.com/docs/federation/building-supergraphs/subgraphs-overview#securing-your-subgraphs) and [use care when propagating headers](https://www.apollographql.com/docs/router/configuration/authn-jwt#example-forwarding-claims-to-subgraphs). +There are a couple potential security pitfalls when leveraging the router for JWT authentication. These are now documented in [the relevant section of the docs](https://www.apollographql.com/docs/router/configuration/authn-jwt). If you are currently using JWT authentication in the router, be sure to [secure your subgraphs](https://www.apollographql.com/docs/federation/building-supergraphs/subgraphs-overview#securing-your-subgraphs) and [use care when propagating headers](https://www.apollographql.com/docs/router/configuration/authn-jwt#example-forwarding-claims-to-subgraphs). By [@dbanty](https://github.com/dbanty) in https://github.com/apollographql/router/pull/3299 @@ -1262,7 +1261,7 @@ This PR includes the following configurable performance optimizations. - Instead of sending subscription data between a Router and subgraph over an open WebSocket, the Router can be configured to send the subgraph a callback URL that will then be used to receive all source stream events - Subscription enabled subgraphs send source stream events (subscription updates) back to the callback URL by making HTTP POST requests - Refer to the [callback mode documentation](https://github.com/apollographql/router/blob/dev/dev-docs/callback_protocol.md) for more details, including an explanation of the callback URL request/response payload format -- This feature is still experimental and needs to be enabled explicitly in the Router config file +- This feature is still experimental and needs to be enabled explicitly in the Router config file By [@bnjjj](https://github.com/bnjjj) and [@o0Ignition0o](https://github.com/o0ignition0o) in https://github.com/apollographql/router/pull/3285 @@ -1296,9 +1295,9 @@ By [@SimonSapin](https://github.com/SimonSapin) in https://github.com/apollograp ### Add support for empty auth prefixes ([Issue #2909](https://github.com/apollographql/router/issues/2909)) -The `authentication.jwt` plugin now supports empty prefixes for the JWT header. Some companies use prefix-less headers; previously, the authentication plugin rejected requests even with an empty header explicitly set, such as: +The `authentication.jwt` plugin now supports empty prefixes for the JWT header. Some companies use prefix-less headers; previously, the authentication plugin rejected requests even with an empty header explicitly set, such as: -```yml +```yml authentication: jwt: header_value_prefix: "" From a6098f8086f0e1805e4009465cd4bb0fd9c89b99 Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Mon, 4 Sep 2023 20:28:35 +0300 Subject: [PATCH 42/81] Fix changelog formatting error on `main` (#3744) Same as https://github.com/apollographql/router/commit/b17018be5c0552b9de57f767189f9a02dd2a7799 but for the other place, before I copied and pasted the changelog that I didn't fix. --- CHANGELOG.md | 1 - 1 file changed, 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f6b1d21712..26f26fb183 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,7 +30,6 @@ directive @authenticated on OBJECT | FIELD_DEFINITION | INTERFACE | SCALAR | ENU This directive allows access to the annotated field or type for authenticated requests only. For more information on how to use these directives, please read Apollo Router [docs](https://www.apollographql.com/docs/router/configuration/authorization) -``` By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3397 https://github.com/apollographql/router/pull/3662 From e7c17ab6a860a94150e0b77f00ba602432aa8d19 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 5 Sep 2023 12:41:07 +0300 Subject: [PATCH 43/81] chore(deps): update actions/checkout action to v4 (#3733) --- .github/workflows/update_uplink_schema.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/update_uplink_schema.yml b/.github/workflows/update_uplink_schema.yml index 20a7560c0b..706c8f0457 100644 --- a/.github/workflows/update_uplink_schema.yml +++ b/.github/workflows/update_uplink_schema.yml @@ -9,7 +9,7 @@ jobs: Update-Uplink-Schema: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4 - name: Install Rover run: | curl -sSL https://rover.apollo.dev/nix/v0.14.1 | sh From 9a0cd4e81b5093168b952dc2b2cf4cccad178ade Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Tue, 5 Sep 2023 12:29:00 +0200 Subject: [PATCH 44/81] metadata cleanup (#3746) * remove unused patch entries in Cargo.toml (referring to a 1 year old commit in opentelemetry) * remove exemptions for the chrono security advisories (they are fixed now) --- .changesets/maint_geal_metadata_cleanup.md | 6 ++++++ Cargo.toml | 7 ------- deny.toml | 6 +----- 3 files changed, 7 insertions(+), 12 deletions(-) create mode 100644 .changesets/maint_geal_metadata_cleanup.md diff --git a/.changesets/maint_geal_metadata_cleanup.md b/.changesets/maint_geal_metadata_cleanup.md new file mode 100644 index 0000000000..ee9628f870 --- /dev/null +++ b/.changesets/maint_geal_metadata_cleanup.md @@ -0,0 +1,6 @@ +### Metadata cleanup ([PR #3746](https://github.com/apollographql/router/pull/3746)) + +* remove unused patch entries in Cargo.toml +* remove exemptions for the chrono security advisories (they are fixed now) + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3746 \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index 3df068499c..9c44480765 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,10 +41,3 @@ incremental = false inherits = "release" debug = 1 -[patch.crates-io] -# TODO: to delete -# opentelemetry = { git = "https://github.com/open-telemetry/opentelemetry-rust.git", rev = "e5ef3552efab2bdbf2f838023c37461cd799ab2c"} -# opentelemetry-http = { git = "https://github.com/open-telemetry/opentelemetry-rust.git", rev = "e5ef3552efab2bdbf2f838023c37461cd799ab2c"} -# opentelemetry-jaeger = { git = "https://github.com/open-telemetry/opentelemetry-rust.git", rev = "e5ef3552efab2bdbf2f838023c37461cd799ab2c"} -# opentelemetry-zipkin = { git = "https://github.com/open-telemetry/opentelemetry-rust.git", rev = "e5ef3552efab2bdbf2f838023c37461cd799ab2c"} -# opentelemetry-datadog = { git = "https://github.com/open-telemetry/opentelemetry-rust.git", rev = "e5ef3552efab2bdbf2f838023c37461cd799ab2c"} diff --git a/deny.toml b/deny.toml index 138c3a6bc2..084bb9a883 100644 --- a/deny.toml +++ b/deny.toml @@ -26,13 +26,9 @@ git-fetch-with-cli = true # A list of advisory IDs to ignore. Note that ignored advisories will still # output a note when they are encountered. -# while https://github.com/chronotope/chrono/issues/499 is open. -# We need to keep track of this issue, and make sure `tracing-subscriber` is updated -# We will then be able to remove this -# # RUSTSEC-2023-0052 and RUSTSEC-2023-0053 are pending a webpki update that is tracked by https://github.com/apollographql/router/issues/3645 # and will be fixed by https://github.com/apollographql/router/pull/3643 -ignore = ["RUSTSEC-2020-0159", "RUSTSEC-2020-0071", "RUSTSEC-2023-0053", "RUSTSEC-2023-0052"] +ignore = ["RUSTSEC-2023-0053", "RUSTSEC-2023-0052"] # This section is considered when running `cargo deny check licenses` # More documentation for the licenses section can be found here: From ea277cab66c6a10d46a6d35cace6fb275639e60b Mon Sep 17 00:00:00 2001 From: Gary Pennington Date: Tue, 5 Sep 2023 11:34:50 +0100 Subject: [PATCH 45/81] provide a rhai interface to the router service (#3234) Adds `Rhai` support for the `router_service`. It is now possible to interact with requests and responses at the `router_service` level from `Rhai`. The functionality is very similar to that provided for interacting with existing services, for example `supergraph_service`. For instance, you may map requests and responses as follows: ``` fn router_service(service) { const request_callback = Fn("process_request"); service.map_request(request_callback); const response_callback = Fn("process_response"); service.map_response(response_callback); } ``` The main difference from existing services is that the router_service is dealing with HTTP Bodies, not well formatted GraphQL objects. This means that the `Request.body` or `Response.body` is not a well structured object that you may interact with, but is simply a String. This makes it more complex to deal with Request and Response bodies with the tradeoff being that a script author has more power and can perform tasks which are just not possible within the confines of a well-formed GraphQL object. This simple example, simply logs the bodies: ``` // Generate a log for each request at this stage fn process_request(request) { print(`body: ${request.body}`); } // Generate a log for each response at this stage fn process_response(response) { print(`body: ${response.body}`); } ``` Fixes #2278 --------- Co-authored-by: Geoffroy Couprie Co-authored-by: Bryn Cooke --- .../feat_garypen_2278_rhai_router_service.md | 43 ++ apollo-router/src/plugins/rhai/engine.rs | 384 ++++++++++++++- apollo-router/src/plugins/rhai/execution.rs | 62 ++- apollo-router/src/plugins/rhai/mod.rs | 450 ++++++++++-------- apollo-router/src/plugins/rhai/router.rs | 70 +++ apollo-router/src/plugins/rhai/subgraph.rs | 59 +++ apollo-router/src/plugins/rhai/supergraph.rs | 62 ++- apollo-router/src/plugins/rhai/tests.rs | 8 +- .../tests/fixtures/test_callbacks.rhai | 14 +- apollo-router/tests/rhai_tests.rs | 5 +- docs/source/customizations/rhai-api.mdx | 29 +- docs/source/customizations/rhai.mdx | 11 +- 12 files changed, 955 insertions(+), 242 deletions(-) create mode 100644 .changesets/feat_garypen_2278_rhai_router_service.md create mode 100644 apollo-router/src/plugins/rhai/router.rs diff --git a/.changesets/feat_garypen_2278_rhai_router_service.md b/.changesets/feat_garypen_2278_rhai_router_service.md new file mode 100644 index 0000000000..c105892e4a --- /dev/null +++ b/.changesets/feat_garypen_2278_rhai_router_service.md @@ -0,0 +1,43 @@ +### Provide a rhai interface to the router service ([Issue #2278](https://github.com/apollographql/router/issues/2278)) + +Adds `Rhai` support for the `router_service`. + +It is now possible to interact with requests and responses at the `router_service` level from `Rhai`. The functionality is very similar to that provided for interacting with existing services, for example `supergraph_service`. For instance, you may map requests and responses as follows: + +```rust +fn router_service(service) { + const request_callback = Fn("process_request"); + service.map_request(request_callback); + const response_callback = Fn("process_response"); + service.map_response(response_callback); +} + +``` +The main difference from existing services is that the router_service is dealing with HTTP Bodies, not well formatted GraphQL objects. This means that the `Request.body` or `Response.body` is not a well structured object that you may interact with, but is simply a String. + +This makes it more complex to deal with Request and Response bodies with the tradeoff being that a script author has more power and can perform tasks which are just not possible within the confines of a well-formed GraphQL object. + +This simple example, simply logs the bodies: + +```rust +// Generate a log for each request at this stage +fn process_request(request) { + print(`body: ${request.body}`); +} + +// Generate a log for each response at this stage +fn process_response(response) { + print(`body: ${response.body}`); +} +``` + +This PR also introduces two new Rhai functions: + +```rust +json_encode(Object) +json_decode(String) -> Object + +``` +Which will respectively encode a `Rhai` Object or decode a JSON string into a `Rhai` Object. These functions may be helpful when dealing with String bodies which represent encoded JSON objects. + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/3234 diff --git a/apollo-router/src/plugins/rhai/engine.rs b/apollo-router/src/plugins/rhai/engine.rs index 135dda4556..ab1dd67194 100644 --- a/apollo-router/src/plugins/rhai/engine.rs +++ b/apollo-router/src/plugins/rhai/engine.rs @@ -6,6 +6,7 @@ use std::time::SystemTime; use base64::prelude::BASE64_STANDARD; use base64::Engine as _; +use bytes::Bytes; use http::header::InvalidHeaderName; use http::uri::Authority; use http::uri::Parts; @@ -30,6 +31,7 @@ use tower::BoxError; use uuid::Uuid; use super::execution; +use super::router; use super::subgraph; use super::supergraph; use super::Rhai; @@ -293,6 +295,16 @@ mod router_json { pub(crate) fn value_to_string(x: &mut Value) -> String { format!("{x:?}") } + + #[rhai_fn(pure, return_raw)] + pub(crate) fn json_encode(input: &mut Dynamic) -> Result> { + serde_json::to_string(input).map_err(|e| e.to_string().into()) + } + + #[rhai_fn(pure, return_raw)] + pub(crate) fn json_decode(input: &mut ImmutableString) -> Result> { + serde_json::from_str(input).map_err(|e| e.to_string().into()) + } } #[export_module] @@ -350,7 +362,67 @@ mod router_context { format!("{x:?}") } + #[rhai_fn(get = "context", pure, return_raw)] + pub(crate) fn router_first_response_context_get( + obj: &mut SharedMut, + ) -> Result> { + Ok(obj.with_mut(|response| response.context.clone())) + } + #[rhai_fn(set = "context", return_raw)] + pub(crate) fn router_first_response_context_set( + obj: &mut SharedMut, + context: Context, + ) -> Result<(), Box> { + obj.with_mut(|response| response.context = context); + Ok(()) + } + + #[rhai_fn(get = "context", pure, return_raw)] + pub(crate) fn supergraph_first_response_context_get( + obj: &mut SharedMut, + ) -> Result> { + Ok(obj.with_mut(|response| response.context.clone())) + } + #[rhai_fn(set = "context", return_raw)] + pub(crate) fn supergraph_first_response_context_set( + obj: &mut SharedMut, + context: Context, + ) -> Result<(), Box> { + obj.with_mut(|response| response.context = context); + Ok(()) + } + + #[rhai_fn(get = "context", pure, return_raw)] + pub(crate) fn execution_first_response_context_get( + obj: &mut SharedMut, + ) -> Result> { + Ok(obj.with_mut(|response| response.context.clone())) + } + #[rhai_fn(set = "context", return_raw)] + pub(crate) fn execution_first_response_context_set( + obj: &mut SharedMut, + context: Context, + ) -> Result<(), Box> { + obj.with_mut(|response| response.context = context); + Ok(()) + } + // Add context getter/setters for deferred responses + #[rhai_fn(get = "context", pure, return_raw)] + pub(crate) fn router_deferred_response_context_get( + obj: &mut SharedMut, + ) -> Result> { + Ok(obj.with_mut(|response| response.context.clone())) + } + #[rhai_fn(set = "context", return_raw)] + pub(crate) fn router_deferred_response_context_set( + obj: &mut SharedMut, + context: Context, + ) -> Result<(), Box> { + obj.with_mut(|response| response.context = context); + Ok(()) + } + #[rhai_fn(get = "context", pure, return_raw)] pub(crate) fn supergraph_deferred_response_context_get( obj: &mut SharedMut, @@ -468,22 +540,48 @@ mod router_plugin { } // End of SubgraphRequest specific section + #[rhai_fn(get = "headers", pure, return_raw)] + pub(crate) fn get_originating_headers_router_response( + obj: &mut SharedMut, + ) -> Result> { + Ok(obj.with_mut(|response| response.response.headers().clone())) + } + + #[rhai_fn(name = "is_primary", pure)] + pub(crate) fn router_response_is_primary(_obj: &mut SharedMut) -> bool { + true + } + + #[rhai_fn(get = "headers", pure, return_raw)] + pub(crate) fn get_originating_headers_router_deferred_response( + _obj: &mut SharedMut, + ) -> Result> { + Err(CANNOT_ACCESS_HEADERS_ON_A_DEFERRED_RESPONSE.into()) + } + + #[rhai_fn(name = "is_primary", pure)] + pub(crate) fn router_deferred_response_is_primary( + _obj: &mut SharedMut, + ) -> bool { + false + } + #[rhai_fn(get = "headers", pure, return_raw)] pub(crate) fn get_originating_headers_supergraph_response( - obj: &mut SharedMut, + obj: &mut SharedMut, ) -> Result> { Ok(obj.with_mut(|response| response.response.headers().clone())) } #[rhai_fn(name = "is_primary", pure)] pub(crate) fn supergraph_response_is_primary( - _obj: &mut SharedMut, + _obj: &mut SharedMut, ) -> bool { true } #[rhai_fn(get = "headers", pure, return_raw)] - pub(crate) fn get_originating_headers_router_deferred_response( + pub(crate) fn get_originating_headers_supergraph_deferred_response( _obj: &mut SharedMut, ) -> Result> { Err(CANNOT_ACCESS_HEADERS_ON_A_DEFERRED_RESPONSE.into()) @@ -498,13 +596,15 @@ mod router_plugin { #[rhai_fn(get = "headers", pure, return_raw)] pub(crate) fn get_originating_headers_execution_response( - obj: &mut SharedMut, + obj: &mut SharedMut, ) -> Result> { Ok(obj.with_mut(|response| response.response.headers().clone())) } #[rhai_fn(name = "is_primary", pure)] - pub(crate) fn execution_response_is_primary(_obj: &mut SharedMut) -> bool { + pub(crate) fn execution_response_is_primary( + _obj: &mut SharedMut, + ) -> bool { true } @@ -529,16 +629,24 @@ mod router_plugin { Ok(obj.with_mut(|response| response.response.headers().clone())) } + /*TODO: reenable when https://github.com/apollographql/router/issues/3642 is decided + #[rhai_fn(get = "body", pure, return_raw)] + pub(crate) fn get_originating_body_router_response( + obj: &mut SharedMut, + ) -> Result, Box> { + Ok(obj.with_mut(|response| response.response.body().to_vec())) + }*/ + #[rhai_fn(get = "body", pure, return_raw)] pub(crate) fn get_originating_body_supergraph_response( - obj: &mut SharedMut, + obj: &mut SharedMut, ) -> Result> { Ok(obj.with_mut(|response| response.response.body().clone())) } #[rhai_fn(get = "body", pure, return_raw)] pub(crate) fn get_originating_body_execution_response( - obj: &mut SharedMut, + obj: &mut SharedMut, ) -> Result> { Ok(obj.with_mut(|response| response.response.body().clone())) } @@ -550,8 +658,24 @@ mod router_plugin { Ok(obj.with_mut(|response| response.response.body().clone())) } + /*TODO: reenable when https://github.com/apollographql/router/issues/3642 is decided #[rhai_fn(get = "body", pure, return_raw)] pub(crate) fn get_originating_body_router_deferred_response( + obj: &mut SharedMut, + ) -> Result> { + // Get the body + let bytes = obj.with_mut(|response| { + let bytes = std::mem::take(&mut response.response); + // Copy back the response so it can continue to be used + response.response = bytes.clone(); + Ok::>(bytes) + })?; + + String::from_utf8(bytes.to_vec()).map_err(|err| err.to_string().into()) + }*/ + + #[rhai_fn(get = "body", pure, return_raw)] + pub(crate) fn get_originating_body_supergraph_deferred_response( obj: &mut SharedMut, ) -> Result> { Ok(obj.with_mut(|response| response.response.clone())) @@ -565,8 +689,8 @@ mod router_plugin { } #[rhai_fn(set = "headers", return_raw)] - pub(crate) fn set_originating_headers_supergraph_response( - obj: &mut SharedMut, + pub(crate) fn set_originating_headers_router_response( + obj: &mut SharedMut, headers: HeaderMap, ) -> Result<(), Box> { obj.with_mut(|response| *response.response.headers_mut() = headers); @@ -575,6 +699,23 @@ mod router_plugin { #[rhai_fn(set = "headers", return_raw)] pub(crate) fn set_originating_headers_router_deferred_response( + _obj: &mut SharedMut, + _headers: HeaderMap, + ) -> Result<(), Box> { + Err(CANNOT_ACCESS_HEADERS_ON_A_DEFERRED_RESPONSE.into()) + } + + #[rhai_fn(set = "headers", return_raw)] + pub(crate) fn set_originating_headers_supergraph_response( + obj: &mut SharedMut, + headers: HeaderMap, + ) -> Result<(), Box> { + obj.with_mut(|response| *response.response.headers_mut() = headers); + Ok(()) + } + + #[rhai_fn(set = "headers", return_raw)] + pub(crate) fn set_originating_headers_supergraph_deferred_response( _obj: &mut SharedMut, _headers: HeaderMap, ) -> Result<(), Box> { @@ -583,7 +724,7 @@ mod router_plugin { #[rhai_fn(set = "headers", return_raw)] pub(crate) fn set_originating_headers_execution_response( - obj: &mut SharedMut, + obj: &mut SharedMut, headers: HeaderMap, ) -> Result<(), Box> { obj.with_mut(|response| *response.response.headers_mut() = headers); @@ -607,9 +748,20 @@ mod router_plugin { Ok(()) } + /*TODO: reenable when https://github.com/apollographql/router/issues/3642 is decided + #[rhai_fn(set = "body", return_raw)] + pub(crate) fn set_originating_body_router_response( + obj: &mut SharedMut, + body: String, + ) -> Result<(), Box> { + let bytes = Bytes::from(body); + obj.with_mut(|response| *response.response.body_mut() = bytes); + Ok(()) + }*/ + #[rhai_fn(set = "body", return_raw)] pub(crate) fn set_originating_body_supergraph_response( - obj: &mut SharedMut, + obj: &mut SharedMut, body: Response, ) -> Result<(), Box> { obj.with_mut(|response| *response.response.body_mut() = body); @@ -618,7 +770,7 @@ mod router_plugin { #[rhai_fn(set = "body", return_raw)] pub(crate) fn set_originating_body_execution_response( - obj: &mut SharedMut, + obj: &mut SharedMut, body: Response, ) -> Result<(), Box> { obj.with_mut(|response| *response.response.body_mut() = body); @@ -634,8 +786,19 @@ mod router_plugin { Ok(()) } + /*TODO: reenable when https://github.com/apollographql/router/issues/3642 is decided #[rhai_fn(set = "body", return_raw)] pub(crate) fn set_originating_body_router_deferred_response( + obj: &mut SharedMut, + body: String, + ) -> Result<(), Box> { + let bytes = Bytes::from(body); + obj.with_mut(|response| response.response = bytes); + Ok(()) + }*/ + + #[rhai_fn(set = "body", return_raw)] + pub(crate) fn set_originating_body_supergraph_deferred_response( obj: &mut SharedMut, body: Response, ) -> Result<(), Box> { @@ -678,7 +841,18 @@ mod router_plugin { } #[rhai_fn(name = "headers_are_available", pure)] - pub(crate) fn supergraph_response(_: &mut SharedMut) -> bool { + pub(crate) fn router_response(_: &mut SharedMut) -> bool { + true + } + + /*TODO: reenable when https://github.com/apollographql/router/issues/3642 is decided + #[rhai_fn(name = "headers_are_available", pure)] + pub(crate) fn router_deferred_response(_: &mut SharedMut) -> bool { + false + }*/ + + #[rhai_fn(name = "headers_are_available", pure)] + pub(crate) fn supergraph_response(_: &mut SharedMut) -> bool { true } @@ -690,7 +864,7 @@ mod router_plugin { } #[rhai_fn(name = "headers_are_available", pure)] - pub(crate) fn execution_response(_: &mut SharedMut) -> bool { + pub(crate) fn execution_response(_: &mut SharedMut) -> bool { true } @@ -934,15 +1108,29 @@ mod router_plugin { } #[derive(Default)] -pub(crate) struct RhaiExecutionResponse { +pub(crate) struct RhaiRouterFirstRequest { pub(crate) context: Context, - pub(crate) response: http_ext::Response, + pub(crate) request: http::Request<()>, } +#[allow(dead_code)] #[derive(Default)] -pub(crate) struct RhaiExecutionDeferredResponse { +pub(crate) struct RhaiRouterChunkedRequest { pub(crate) context: Context, - pub(crate) response: Response, + pub(crate) request: Bytes, +} + +#[derive(Default)] +pub(crate) struct RhaiRouterResponse { + pub(crate) context: Context, + pub(crate) response: http::Response<()>, +} + +#[allow(dead_code)] +#[derive(Default)] +pub(crate) struct RhaiRouterChunkedResponse { + pub(crate) context: Context, + pub(crate) response: Bytes, } #[derive(Default)] @@ -957,6 +1145,18 @@ pub(crate) struct RhaiSupergraphDeferredResponse { pub(crate) response: Response, } +#[derive(Default)] +pub(crate) struct RhaiExecutionResponse { + pub(crate) context: Context, + pub(crate) response: http_ext::Response, +} + +#[derive(Default)] +pub(crate) struct RhaiExecutionDeferredResponse { + pub(crate) context: Context, + pub(crate) response: Response, +} + macro_rules! if_subgraph { ( subgraph => $subgraph: block else $not_subgraph: block ) => { $subgraph @@ -966,6 +1166,152 @@ macro_rules! if_subgraph { }; } +macro_rules! register_rhai_router_interface { + ($engine: ident, $($base: ident), *) => { + $( + // Context stuff + $engine.register_get( + "context", + |obj: &mut SharedMut<$base::FirstRequest>| -> Result> { + Ok(obj.with_mut(|request| request.context.clone())) + } + ) + .register_get( + "context", + |obj: &mut SharedMut<$base::ChunkedRequest>| -> Result> { + Ok(obj.with_mut(|request| request.context.clone())) + } + ).register_get( + "context", + |obj: &mut SharedMut<$base::Response>| -> Result> { + Ok(obj.with_mut(|response| response.context.clone())) + } + ) + .register_get( + "context", + |obj: &mut SharedMut<$base::DeferredResponse>| -> Result> { + Ok(obj.with_mut(|response| response.context.clone())) + } + ); + + $engine.register_set( + "context", + |obj: &mut SharedMut<$base::FirstRequest>, context: Context| { + obj.with_mut(|request| request.context = context); + Ok(()) + } + ) + .register_set( + "context", + |obj: &mut SharedMut<$base::ChunkedRequest>, context: Context| { + obj.with_mut(|request| request.context = context); + Ok(()) + } + ) + .register_set( + "context", + |obj: &mut SharedMut<$base::Response>, context: Context| { + obj.with_mut(|response| response.context = context); + Ok(()) + } + ).register_set( + "context", + |obj: &mut SharedMut<$base::DeferredResponse>, context: Context| { + obj.with_mut(|response| response.context = context); + Ok(()) + } + ); + + // Originating Request + $engine.register_get( + "headers", + |obj: &mut SharedMut<$base::FirstRequest>| -> Result> { + Ok(obj.with_mut(|request| request.request.headers().clone())) + } + ).register_get( + "headers", + |obj: &mut SharedMut<$base::Response>| -> Result> { + Ok(obj.with_mut(|response| response.response.headers().clone())) + } + ); + + $engine.register_set( + "headers", + |obj: &mut SharedMut<$base::FirstRequest>, headers: HeaderMap| { + if_subgraph! { + $base => { + let _unused = (obj, headers); + Err("cannot mutate originating request on a subgraph".into()) + } else { + obj.with_mut(|request| *request.request.headers_mut() = headers); + Ok(()) + } + } + } + ).register_set( + "headers", + |obj: &mut SharedMut<$base::Response>, headers: HeaderMap| { + if_subgraph! { + $base => { + let _unused = (obj, headers); + Err("cannot mutate originating request on a subgraph".into()) + } else { + obj.with_mut(|response| *response.response.headers_mut() = headers); + Ok(()) + } + } + } + ); + + /*TODO: reenable when https://github.com/apollographql/router/issues/3642 is decided + $engine.register_get( + "body", + |obj: &mut SharedMut<$base::ChunkedRequest>| -> Result, Box> { + Ok( obj.with_mut(|request| { request.request.to_vec()})) + } + ); + + $engine.register_set( + "body", + |obj: &mut SharedMut<$base::ChunkedRequest>, body: Vec| { + if_subgraph! { + $base => { + let _unused = (obj, body); + Err("cannot mutate originating request on a subgraph".into()) + } else { + let bytes = Bytes::from(body); + obj.with_mut(|request| request.request = bytes); + Ok(()) + } + } + } + );*/ + + $engine.register_get( + "uri", + |obj: &mut SharedMut<$base::Request>| -> Result> { + Ok(obj.with_mut(|request| request.router_request.uri().clone())) + } + ); + + $engine.register_set( + "uri", + |obj: &mut SharedMut<$base::Request>, uri: Uri| { + if_subgraph! { + $base => { + let _unused = (obj, uri); + Err("cannot mutate originating request on a subgraph".into()) + } else { + obj.with_mut(|request| *request.router_request.uri_mut() = uri); + Ok(()) + } + } + } + ); + )* + }; +} + macro_rules! register_rhai_interface { ($engine: ident, $($base: ident), *) => { $( @@ -1190,6 +1536,8 @@ impl Rhai { tracing::error!(%message, target = %error_main); }); // Add common getter/setters for different types + register_rhai_router_interface!(engine, router); + // Add common getter/setters for different types register_rhai_interface!(engine, supergraph, execution, subgraph); // Since constants in Rhai don't give us the behaviour we expect, let's create some global diff --git a/apollo-router/src/plugins/rhai/execution.rs b/apollo-router/src/plugins/rhai/execution.rs index ea9cfd0d60..a04b00211d 100644 --- a/apollo-router/src/plugins/rhai/execution.rs +++ b/apollo-router/src/plugins/rhai/execution.rs @@ -1,5 +1,65 @@ //! execution module +use std::ops::ControlFlow; + +use tower::BoxError; + +use super::ErrorDetails; +use crate::graphql::Error; pub(crate) use crate::services::execution::*; -pub(crate) type Response = super::engine::RhaiExecutionResponse; +use crate::Context; + +pub(crate) type FirstResponse = super::engine::RhaiExecutionResponse; pub(crate) type DeferredResponse = super::engine::RhaiExecutionDeferredResponse; + +pub(super) fn request_failure( + context: Context, + error_details: ErrorDetails, +) -> Result, BoxError> { + let res = if let Some(body) = error_details.body { + Response::builder() + .extensions(body.extensions) + .errors(body.errors) + .status_code(error_details.status) + .context(context) + .and_data(body.data) + .and_label(body.label) + .and_path(body.path) + .build()? + } else { + Response::error_builder() + .errors(vec![Error { + message: error_details.message.unwrap_or_default(), + ..Default::default() + }]) + .context(context) + .status_code(error_details.status) + .build()? + }; + + Ok(ControlFlow::Break(res)) +} + +pub(super) fn response_failure(context: Context, error_details: ErrorDetails) -> Response { + if let Some(body) = error_details.body { + Response::builder() + .extensions(body.extensions) + .errors(body.errors) + .status_code(error_details.status) + .context(context) + .and_data(body.data) + .and_label(body.label) + .and_path(body.path) + .build() + } else { + Response::error_builder() + .errors(vec![Error { + message: error_details.message.unwrap_or_default(), + ..Default::default() + }]) + .status_code(error_details.status) + .context(context) + .build() + } + .expect("can't fail to build our error message") +} diff --git a/apollo-router/src/plugins/rhai/mod.rs b/apollo-router/src/plugins/rhai/mod.rs index 5dce61bccb..2f19f9cf32 100644 --- a/apollo-router/src/plugins/rhai/mod.rs +++ b/apollo-router/src/plugins/rhai/mod.rs @@ -45,22 +45,14 @@ use crate::layers::ServiceBuilderExt; use crate::plugin::Plugin; use crate::plugin::PluginInit; use crate::plugins::rhai::engine::OptionDance; -use crate::plugins::rhai::engine::RhaiExecutionDeferredResponse; -use crate::plugins::rhai::engine::RhaiExecutionResponse; -use crate::plugins::rhai::engine::RhaiSupergraphDeferredResponse; -use crate::plugins::rhai::engine::RhaiSupergraphResponse; use crate::register_plugin; -use crate::services::ExecutionRequest; -use crate::services::ExecutionResponse; -use crate::services::SupergraphRequest; -use crate::services::SupergraphResponse; -use crate::Context; mod engine; pub(crate) const RHAI_SPAN_NAME: &str = "rhai_plugin"; mod execution; +mod router; mod subgraph; mod supergraph; @@ -230,6 +222,24 @@ impl Plugin for Rhai { }) } + fn router_service(&self, service: router::BoxService) -> router::BoxService { + const FUNCTION_NAME_SERVICE: &str = "router_service"; + if !self.ast_has_function(FUNCTION_NAME_SERVICE) { + return service; + } + tracing::debug!("router_service function found"); + let shared_service = Arc::new(Mutex::new(Some(service))); + if let Err(error) = self.run_rhai_service( + FUNCTION_NAME_SERVICE, + None, + ServiceStep::Router(shared_service.clone()), + self.block.load().scope.clone(), + ) { + tracing::error!("service callback failed: {error}"); + } + shared_service.take_unwrap() + } + fn supergraph_service(&self, service: supergraph::BoxService) -> supergraph::BoxService { const FUNCTION_NAME_SERVICE: &str = "supergraph_service"; if !self.ast_has_function(FUNCTION_NAME_SERVICE) { @@ -297,6 +307,7 @@ impl Drop for Rhai { #[derive(Clone, Debug)] pub(crate) enum ServiceStep { + Router(SharedMut), Supergraph(SharedMut), Execution(SharedMut), Subgraph(SharedMut), @@ -318,57 +329,15 @@ macro_rules! gen_map_request { ServiceBuilder::new() .instrument(rhai_service_span()) .checkpoint(move |request: $base::Request| { - // Let's define a local function to build an error response - fn failure_message( - context: Context, - error_details: ErrorDetails, - ) -> Result, BoxError> - { - let res = if let Some(body) = error_details.body { - $base::Response::builder() - .extensions(body.extensions) - .errors(body.errors) - .status_code(error_details.status) - .context(context) - .and_data(body.data) - .and_label(body.label) - .and_path(body.path) - .build() - } else { - $base::Response::error_builder() - .errors(vec![Error { - message: error_details.message.unwrap_or_default(), - ..Default::default() - }]) - .context(context) - .status_code(error_details.status) - .build()? - }; - - Ok(ControlFlow::Break(res)) - } let shared_request = Shared::new(Mutex::new(Some(request))); - let result: Result> = if $callback.is_curried() { - $callback.call( - &$rhai_service.engine, - &$rhai_service.ast, - (shared_request.clone(),), - ) - } else { - let mut guard = $rhai_service.scope.lock().unwrap(); - $rhai_service.engine.call_fn( - &mut guard, - &$rhai_service.ast, - $callback.fn_name(), - (shared_request.clone(),), - ) - }; + let result: Result> = + execute(&$rhai_service, &$callback, (shared_request.clone(),)); if let Err(error) = result { let error_details = process_error(error); tracing::error!("map_request callback failed: {error_details:#?}"); let mut guard = shared_request.lock().unwrap(); let request_opt = guard.take(); - return failure_message(request_opt.unwrap().context, error_details); + return $base::request_failure(request_opt.unwrap().context, error_details); } let mut guard = shared_request.lock().unwrap(); let request_opt = guard.take(); @@ -381,49 +350,33 @@ macro_rules! gen_map_request { } // Actually use the checkpoint function so that we can shortcut requests which fail -macro_rules! gen_map_deferred_request { - ($request: ident, $response: ident, $borrow: ident, $rhai_service: ident, $callback: ident) => { +macro_rules! gen_map_router_deferred_request { + ($base: ident, $borrow: ident, $rhai_service: ident, $callback: ident) => { $borrow.replace(|service| { - fn rhai_service_span() -> impl Fn(&$request) -> tracing::Span + Clone { - move |_request: &$request| { + fn rhai_service_span() -> impl Fn(&$base::Request) -> tracing::Span + Clone { + move |_request: &$base::Request| { tracing::info_span!( RHAI_SPAN_NAME, - "rhai service" = stringify!($request), + "rhai service" = stringify!($base::Request), "otel.kind" = "INTERNAL" ) } } ServiceBuilder::new() .instrument(rhai_service_span()) - .checkpoint(move |request: $request| { - // Let's define a local function to build an error response - fn failure_message( - context: Context, - error_details: ErrorDetails, - ) -> Result, BoxError> { - let res = if let Some(body) = error_details.body { - $response::builder() - .extensions(body.extensions) - .errors(body.errors) - .status_code(error_details.status) - .context(context) - .and_data(body.data) - .and_label(body.label) - .and_path(body.path) - .build()? - } else { - $response::error_builder() - .errors(vec![Error { - message: error_details.message.unwrap_or_default(), - ..Default::default() - }]) - .context(context) - .status_code(error_details.status) - .build()? - }; + .checkpoint( move |chunked_request: $base::Request| { + // we split the request stream into headers+first body chunk, then a stream of chunks + // for which we will implement mapping later + let $base::Request { router_request, context } = chunked_request; + let (parts, stream) = router_request.into_parts(); - Ok(ControlFlow::Break(res)) - } + let request = $base::FirstRequest { + context, + request: http::Request::from_parts( + parts, + (), + ), + }; let shared_request = Shared::new(Mutex::new(Some(request))); let result = execute(&$rhai_service, &$callback, (shared_request.clone(),)); @@ -432,76 +385,97 @@ macro_rules! gen_map_deferred_request { let error_details = process_error(error); let mut guard = shared_request.lock().unwrap(); let request_opt = guard.take(); - return failure_message(request_opt.unwrap().context, error_details); + return $base::request_failure(request_opt.unwrap().context, error_details); } - let mut guard = shared_request.lock().unwrap(); - let request_opt = guard.take(); - Ok(ControlFlow::Continue(request_opt.unwrap())) + + let request_opt = shared_request.lock().unwrap().take(); + + let $base::FirstRequest { context, request } = + request_opt.unwrap(); + let (parts, _body) = http::Request::from(request).into_parts(); + + // Finally, return a response which has a Body that wraps our stream of response chunks. + Ok(ControlFlow::Continue($base::Request { + context, + router_request: http::Request::from_parts(parts, stream), + })) + + /*TODO: reenable when https://github.com/apollographql/router/issues/3642 is decided + let ctx = context.clone(); + let rhai_service = $rhai_service.clone(); + let callback = $callback.clone(); + + let mapped_stream = stream + .map_err(BoxError::from) + .and_then(move |chunk| { + let context = ctx.clone(); + let rhai_service = rhai_service.clone(); + let callback = callback.clone(); + async move { + let request = $base::ChunkedRequest { + context, + request: chunk.into(), + }; + let shared_request = Shared::new(Mutex::new(Some(request))); + + let result = execute( + &rhai_service, + &callback, + (shared_request.clone(),), + ); + + if let Err(error) = result { + tracing::error!("map_request callback failed: {error}"); + let error_details = process_error(error); + let error = Error { + message: error_details.message.unwrap_or_default(), + ..Default::default() + }; + // We don't have a structured response to work with here. Let's + // throw away our response and custom build an error response + let error_response = graphql::Response::builder() + .errors(vec![error]).build(); + return Ok(serde_json::to_vec(&error_response)?.into()); + } + + let request_opt = shared_request.lock().unwrap().take(); + let $base::ChunkedRequest { request, .. } = + request_opt.unwrap(); + Ok(request) + } + }); + + // Finally, return a response which has a Body that wraps our stream of response chunks. + Ok(ControlFlow::Continue($base::Request { + context, + router_request: http::Request::from_parts(parts, hyper::Body::wrap_stream(mapped_stream)), + })) + */ }) .service(service) .boxed() }) }; } + macro_rules! gen_map_response { ($base: ident, $borrow: ident, $rhai_service: ident, $callback: ident) => { $borrow.replace(|service| { service .map_response(move |response: $base::Response| { - // Let's define a local function to build an error response - // XXX: This isn't ideal. We already have a response, so ideally we'd - // like to append this error into the existing response. However, - // the significantly different treatment of errors in different - // response types makes this extremely painful. This needs to be - // re-visited at some point post GA. - fn failure_message( - context: Context, - error_details: ErrorDetails, - ) -> $base::Response { - if let Some(body) = error_details.body { - $base::Response::builder() - .extensions(body.extensions) - .errors(body.errors) - .status_code(error_details.status) - .context(context) - .and_data(body.data) - .and_label(body.label) - .and_path(body.path) - .build() - } else { - $base::Response::error_builder() - .errors(vec![Error { - message: error_details.message.unwrap_or_default(), - ..Default::default() - }]) - .status_code(error_details.status) - .context(context) - .build() - .expect("can't fail to build our error message") - } - } let shared_response = Shared::new(Mutex::new(Some(response))); - let result: Result> = if $callback.is_curried() { - $callback.call( - &$rhai_service.engine, - &$rhai_service.ast, - (shared_response.clone(),), - ) - } else { - let mut guard = $rhai_service.scope.lock().unwrap(); - $rhai_service.engine.call_fn( - &mut guard, - &$rhai_service.ast, - $callback.fn_name(), - (shared_response.clone(),), - ) - }; + let result: Result> = + execute(&$rhai_service, &$callback, (shared_response.clone(),)); + if let Err(error) = result { tracing::error!("map_response callback failed: {error}"); let error_details = process_error(error); let mut guard = shared_response.lock().unwrap(); let response_opt = guard.take(); - return failure_message(response_opt.unwrap().context, error_details); + return $base::response_failure( + response_opt.unwrap().context, + error_details, + ); } let mut guard = shared_response.lock().unwrap(); let response_opt = guard.take(); @@ -512,46 +486,122 @@ macro_rules! gen_map_response { }; } -macro_rules! gen_map_deferred_response { - ($response: ident, $rhai_response: ident, $rhai_deferred_response: ident, $borrow: ident, $rhai_service: ident, $callback: ident) => { +// Even though this macro is only ever used to generate router service handling, I'm leaving it as +// a macro so that the code shape is "similar" to the way in which other services are processed. +// +// I can't easily unify the macros because the router response processing is quite different to +// other service in terms of payload. +macro_rules! gen_map_router_deferred_response { + ($base: ident, $borrow: ident, $rhai_service: ident, $callback: ident) => { $borrow.replace(|service| { BoxService::new(service.and_then( - |mapped_response: $response| async move { - // Let's define a local function to build an error response - // XXX: This isn't ideal. We already have a response, so ideally we'd - // like to append this error into the existing response. However, - // the significantly different treatment of errors in different - // response types makes this extremely painful. This needs to be - // re-visited at some point post GA. - fn failure_message( - context: Context, - error_details: ErrorDetails, - ) -> $response { - if let Some(body) = error_details.body { - $response::builder() - .extensions(body.extensions) - .errors(body.errors) - .status_code(error_details.status) - .context(context) - .and_data(body.data) - .and_label(body.label) - .and_path(body.path) - .build() - } else { - $response::error_builder() - .errors(vec![Error { + |mapped_response: $base::Response| async move { + // we split the response stream into headers+first response, then a stream of deferred responses + // for which we will implement mapping later + let $base::Response { response, context } = mapped_response; + let (parts, stream) = response.into_parts(); + + let response = $base::FirstResponse { + context, + response: http::Response::from_parts( + parts, + (), + ) + .into(), + }; + let shared_response = Shared::new(Mutex::new(Some(response))); + + let result = + execute(&$rhai_service, &$callback, (shared_response.clone(),)); + if let Err(error) = result { + tracing::error!("map_response callback failed: {error}"); + let error_details = process_error(error); + let response_opt = shared_response.lock().unwrap().take(); + return Ok($base::response_failure( + response_opt.unwrap().context, + error_details + )); + } + + let response_opt = shared_response.lock().unwrap().take(); + + let $base::FirstResponse { context, response } = + response_opt.unwrap(); + let (parts, _body) = http::Response::from(response).into_parts(); + + + // Finally, return a response which has a Body that wraps our stream of response chunks. + Ok($base::Response { + context, + response: http::Response::from_parts(parts, stream), + }) + + /*TODO: reenable when https://github.com/apollographql/router/issues/3642 is decided + let ctx = context.clone(); + + let mapped_stream = rest + .map_err(BoxError::from) + .and_then(move |deferred_response| { + let rhai_service = $rhai_service.clone(); + let context = ctx.clone(); + let callback = $callback.clone(); + async move { + let response = $base::DeferredResponse { + context, + response: deferred_response.into(), + }; + let shared_response = Shared::new(Mutex::new(Some(response))); + + let result = execute( + &rhai_service, + &callback, + (shared_response.clone(),), + ); + + if let Err(error) = result { + tracing::error!("map_response callback failed: {error}"); + let error_details = process_error(error); + let error = Error { message: error_details.message.unwrap_or_default(), ..Default::default() - }]) - .status_code(error_details.status) - .context(context) - .build() - }.expect("can't fail to build our error message") - } + }; + // We don't have a structured response to work with here. Let's + // throw away our response and custom build an error response + let error_response = graphql::Response::builder() + .errors(vec![error]).build(); + return Ok(serde_json::to_vec(&error_response)?.into()); + } + + let response_opt = shared_response.lock().unwrap().take(); + let $base::DeferredResponse { response, .. } = + response_opt.unwrap(); + Ok(response) + } + }); + + // Create our response stream which consists of the bytes from our first body chained with the + // rest of the responses in our mapped stream. + let final_stream = once(ready(Ok(body))).chain(mapped_stream).boxed(); + + // Finally, return a response which has a Body that wraps our stream of response chunks. + Ok($base::Response { + context, + response: http::Response::from_parts(parts, hyper::Body::wrap_stream(final_stream)), + })*/ + }, + )) + }) + }; +} +macro_rules! gen_map_deferred_response { + ($base: ident, $borrow: ident, $rhai_service: ident, $callback: ident) => { + $borrow.replace(|service| { + BoxService::new(service.and_then( + |mapped_response: $base::Response| async move { // we split the response stream into headers+first response, then a stream of deferred responses // for which we will implement mapping later - let $response { response, context } = mapped_response; + let $base::Response { response, context } = mapped_response; let (parts, stream) = response.into_parts(); let (first, rest) = stream.into_future().await; @@ -562,13 +612,13 @@ macro_rules! gen_map_deferred_response { position: None, body: None }; - return Ok(failure_message( + return Ok($base::response_failure( context, error_details )); } - let response = $rhai_response { + let response = $base::FirstResponse { context, response: http::Response::from_parts( parts, @@ -585,7 +635,7 @@ macro_rules! gen_map_deferred_response { let error_details = process_error(error); let mut guard = shared_response.lock().unwrap(); let response_opt = guard.take(); - return Ok(failure_message( + return Ok($base::response_failure( response_opt.unwrap().context, error_details )); @@ -593,7 +643,7 @@ macro_rules! gen_map_deferred_response { let mut guard = shared_response.lock().unwrap(); let response_opt = guard.take(); - let $rhai_response { context, response } = + let $base::FirstResponse { context, response } = response_opt.unwrap(); let (parts, body) = http::Response::from(response).into_parts(); @@ -604,7 +654,7 @@ macro_rules! gen_map_deferred_response { let context = context.clone(); let callback = $callback.clone(); async move { - let response = $rhai_deferred_response { + let response = $base::DeferredResponse { context, response: deferred_response, }; @@ -620,7 +670,7 @@ macro_rules! gen_map_deferred_response { let error_details = process_error(error); let mut guard = shared_response.lock().unwrap(); let response_opt = guard.take(); - let $rhai_deferred_response { mut response, .. } = response_opt.unwrap(); + let $base::DeferredResponse { mut response, .. } = response_opt.unwrap(); let error = Error { message: error_details.message.unwrap_or_default(), ..Default::default() @@ -631,7 +681,7 @@ macro_rules! gen_map_deferred_response { let mut guard = shared_response.lock().unwrap(); let response_opt = guard.take(); - let $rhai_deferred_response { response, .. } = + let $base::DeferredResponse { response, .. } = response_opt.unwrap(); Some(response) } @@ -642,7 +692,7 @@ macro_rules! gen_map_deferred_response { once(ready(body)).chain(mapped_stream).boxed(), ) .into(); - Ok($response { + Ok($base::Response { context: ctx, response, }) @@ -655,23 +705,14 @@ macro_rules! gen_map_deferred_response { impl ServiceStep { fn map_request(&mut self, rhai_service: RhaiService, callback: FnPtr) { match self { + ServiceStep::Router(service) => { + gen_map_router_deferred_request!(router, service, rhai_service, callback); + } ServiceStep::Supergraph(service) => { - gen_map_deferred_request!( - SupergraphRequest, - SupergraphResponse, - service, - rhai_service, - callback - ); + gen_map_request!(supergraph, service, rhai_service, callback); } ServiceStep::Execution(service) => { - gen_map_deferred_request!( - ExecutionRequest, - ExecutionResponse, - service, - rhai_service, - callback - ); + gen_map_request!(execution, service, rhai_service, callback); } ServiceStep::Subgraph(service) => { gen_map_request!(subgraph, service, rhai_service, callback); @@ -681,25 +722,14 @@ impl ServiceStep { fn map_response(&mut self, rhai_service: RhaiService, callback: FnPtr) { match self { + ServiceStep::Router(service) => { + gen_map_router_deferred_response!(router, service, rhai_service, callback); + } ServiceStep::Supergraph(service) => { - gen_map_deferred_response!( - SupergraphResponse, - RhaiSupergraphResponse, - RhaiSupergraphDeferredResponse, - service, - rhai_service, - callback - ); + gen_map_deferred_response!(supergraph, service, rhai_service, callback); } ServiceStep::Execution(service) => { - gen_map_deferred_response!( - ExecutionResponse, - RhaiExecutionResponse, - RhaiExecutionDeferredResponse, - service, - rhai_service, - callback - ); + gen_map_deferred_response!(execution, service, rhai_service, callback); } ServiceStep::Subgraph(service) => { gen_map_response!(subgraph, service, rhai_service, callback); diff --git a/apollo-router/src/plugins/rhai/router.rs b/apollo-router/src/plugins/rhai/router.rs new file mode 100644 index 0000000000..2a6313daf2 --- /dev/null +++ b/apollo-router/src/plugins/rhai/router.rs @@ -0,0 +1,70 @@ +//! router module + +use std::ops::ControlFlow; + +use tower::BoxError; + +use super::ErrorDetails; +use crate::graphql::Error; +pub(crate) use crate::services::router::*; +use crate::Context; + +pub(crate) type FirstRequest = super::engine::RhaiRouterFirstRequest; +pub(crate) type ChunkedRequest = super::engine::RhaiRouterChunkedRequest; +pub(crate) type FirstResponse = super::engine::RhaiRouterResponse; +pub(crate) type DeferredResponse = super::engine::RhaiRouterChunkedResponse; + +pub(super) fn request_failure( + context: Context, + error_details: ErrorDetails, +) -> Result, BoxError> { + let res = if let Some(body) = error_details.body { + crate::services::router::Response::builder() + .extensions(body.extensions) + .errors(body.errors) + .status_code(error_details.status) + .context(context) + .and_data(body.data) + .and_label(body.label) + .and_path(body.path) + .build()? + } else { + crate::services::router::Response::error_builder() + .errors(vec![Error { + message: error_details.message.unwrap_or_default(), + ..Default::default() + }]) + .context(context) + .status_code(error_details.status) + .build()? + }; + + Ok(ControlFlow::Break(res)) +} + +pub(super) fn response_failure( + context: Context, + error_details: ErrorDetails, +) -> crate::services::router::Response { + if let Some(body) = error_details.body { + crate::services::router::Response::builder() + .extensions(body.extensions) + .errors(body.errors) + .status_code(error_details.status) + .context(context) + .and_data(body.data) + .and_label(body.label) + .and_path(body.path) + .build() + } else { + crate::services::router::Response::error_builder() + .errors(vec![Error { + message: error_details.message.unwrap_or_default(), + ..Default::default() + }]) + .status_code(error_details.status) + .context(context) + .build() + } + .expect("can't fail to build our error message") +} diff --git a/apollo-router/src/plugins/rhai/subgraph.rs b/apollo-router/src/plugins/rhai/subgraph.rs index 22da7cc63b..110dce38d5 100644 --- a/apollo-router/src/plugins/rhai/subgraph.rs +++ b/apollo-router/src/plugins/rhai/subgraph.rs @@ -1,3 +1,62 @@ //! subgraph module +use std::ops::ControlFlow; + +use tower::BoxError; + +use super::ErrorDetails; +use crate::graphql::Error; pub(crate) use crate::services::subgraph::*; +use crate::Context; + +pub(super) fn request_failure( + context: Context, + error_details: ErrorDetails, +) -> Result, BoxError> { + let res = if let Some(body) = error_details.body { + Response::builder() + .extensions(body.extensions) + .errors(body.errors) + .status_code(error_details.status) + .context(context) + .and_data(body.data) + .and_label(body.label) + .and_path(body.path) + .build() + } else { + Response::error_builder() + .errors(vec![Error { + message: error_details.message.unwrap_or_default(), + ..Default::default() + }]) + .context(context) + .status_code(error_details.status) + .build()? + }; + + Ok(ControlFlow::Break(res)) +} + +pub(super) fn response_failure(context: Context, error_details: ErrorDetails) -> Response { + if let Some(body) = error_details.body { + Response::builder() + .extensions(body.extensions) + .errors(body.errors) + .status_code(error_details.status) + .context(context) + .and_data(body.data) + .and_label(body.label) + .and_path(body.path) + .build() + } else { + Response::error_builder() + .errors(vec![Error { + message: error_details.message.unwrap_or_default(), + ..Default::default() + }]) + .status_code(error_details.status) + .context(context) + .build() + .expect("can't fail to build our error message") + } +} diff --git a/apollo-router/src/plugins/rhai/supergraph.rs b/apollo-router/src/plugins/rhai/supergraph.rs index 01cc448b96..9f2905ab62 100644 --- a/apollo-router/src/plugins/rhai/supergraph.rs +++ b/apollo-router/src/plugins/rhai/supergraph.rs @@ -1,5 +1,65 @@ //! supergraph module +use std::ops::ControlFlow; + +use tower::BoxError; + +use super::ErrorDetails; +use crate::graphql::Error; pub(crate) use crate::services::supergraph::*; -pub(crate) type Response = super::engine::RhaiSupergraphResponse; +use crate::Context; + +pub(crate) type FirstResponse = super::engine::RhaiSupergraphResponse; pub(crate) type DeferredResponse = super::engine::RhaiSupergraphDeferredResponse; + +pub(super) fn request_failure( + context: Context, + error_details: ErrorDetails, +) -> Result, BoxError> { + let res = if let Some(body) = error_details.body { + Response::builder() + .extensions(body.extensions) + .errors(body.errors) + .status_code(error_details.status) + .context(context) + .and_data(body.data) + .and_label(body.label) + .and_path(body.path) + .build()? + } else { + Response::error_builder() + .errors(vec![Error { + message: error_details.message.unwrap_or_default(), + ..Default::default() + }]) + .context(context) + .status_code(error_details.status) + .build()? + }; + + Ok(ControlFlow::Break(res)) +} + +pub(super) fn response_failure(context: Context, error_details: ErrorDetails) -> Response { + if let Some(body) = error_details.body { + Response::builder() + .extensions(body.extensions) + .errors(body.errors) + .status_code(error_details.status) + .context(context) + .and_data(body.data) + .and_label(body.label) + .and_path(body.path) + .build() + } else { + Response::error_builder() + .errors(vec![Error { + message: error_details.message.unwrap_or_default(), + ..Default::default() + }]) + .status_code(error_details.status) + .context(context) + .build() + } + .expect("can't fail to build our error message") +} diff --git a/apollo-router/src/plugins/rhai/tests.rs b/apollo-router/src/plugins/rhai/tests.rs index a2e5fee18e..bb56ceb654 100644 --- a/apollo-router/src/plugins/rhai/tests.rs +++ b/apollo-router/src/plugins/rhai/tests.rs @@ -21,16 +21,16 @@ use super::process_error; use super::subgraph; use super::PathBuf; use super::Rhai; -use super::RhaiExecutionDeferredResponse; -use super::RhaiExecutionResponse; -use super::RhaiSupergraphDeferredResponse; -use super::RhaiSupergraphResponse; use crate::graphql::Error; use crate::graphql::Request; use crate::http_ext; use crate::plugin::test::MockExecutionService; use crate::plugin::test::MockSupergraphService; use crate::plugin::DynPlugin; +use crate::plugins::rhai::engine::RhaiExecutionDeferredResponse; +use crate::plugins::rhai::engine::RhaiExecutionResponse; +use crate::plugins::rhai::engine::RhaiSupergraphDeferredResponse; +use crate::plugins::rhai::engine::RhaiSupergraphResponse; use crate::services::ExecutionRequest; use crate::services::SubgraphRequest; use crate::services::SupergraphRequest; diff --git a/apollo-router/tests/fixtures/test_callbacks.rhai b/apollo-router/tests/fixtures/test_callbacks.rhai index 340cde9d5e..7ded5601d9 100644 --- a/apollo-router/tests/fixtures/test_callbacks.rhai +++ b/apollo-router/tests/fixtures/test_callbacks.rhai @@ -1,7 +1,7 @@ // This is a test used to make sure each callback is called -fn supergraph_service(service) { - log_info("supergraph_service setup"); +fn router_service(service) { + log_info("router_service setup"); service.map_request(|request| { log_info("from_router_request"); }); @@ -10,6 +10,16 @@ fn supergraph_service(service) { }); } +fn supergraph_service(service) { + log_info("supergraph_service setup"); + service.map_request(|request| { + log_info("from_supergraph_request"); + }); + service.map_response(|response| { + log_info("from_supergraph_response"); + }); +} + fn execution_service(service) { log_info("execution_service setup"); service.map_request(|request| { diff --git a/apollo-router/tests/rhai_tests.rs b/apollo-router/tests/rhai_tests.rs index e1578ae423..d8ff1aecf9 100644 --- a/apollo-router/tests/rhai_tests.rs +++ b/apollo-router/tests/rhai_tests.rs @@ -46,9 +46,12 @@ async fn all_rhai_callbacks_are_invoked() { .unwrap(); dbg!(_response); for expected_log in [ - "supergraph_service setup", + "router_service setup", "from_router_request", "from_router_response", + "supergraph_service setup", + "from_supergraph_request", + "from_supergraph_response", "execution_service setup", "from_execution_request", "from_execution_response", diff --git a/docs/source/customizations/rhai-api.mdx b/docs/source/customizations/rhai-api.mdx index 5496a24da9..83775e96e5 100644 --- a/docs/source/customizations/rhai-api.mdx +++ b/docs/source/customizations/rhai-api.mdx @@ -12,6 +12,7 @@ This article documents symbols and behaviors that are specific to [Rhai customiz Your Rhai script's [main file](./rhai/#the-main-file) hooks into the individual services of the Apollo Router's [request-handling lifecycle](./rhai/#router-request-lifecycle). To do so, it defines whichever combination of the following entry point hooks it requires: ```rhai +fn router_service(service) {} fn supergraph_service(service) {} fn execution_service(service) {} fn subgraph_service(service, subgraph) {} @@ -192,6 +193,31 @@ fn supergraph_service(service) { } ``` +## json encode/decode strings + +Your Rhai customization can use the functions `json_encode()` and `json_decode()` to convert Rhai objects to/from valid JSON encoded strings. Both functions can fail, so always handle exceptions when using them. + +```rhai +fn router_service(service) { + let original = `{"valid":"object"}`; + try { + let encoded = json_decode(original); + // encoded is a Rhai object, with a property (or key) named valid with a String value of "object" + print(`encoded.valid: ${encoded.valid}`); + let and_back = json_encode(encoded); + // and_back will be a string == original. + if and_back != original { + throw "something has gone wrong"; + } + } + catch(err) + { + // log any errors + log_error(`json coding error: ${err}`); + } +} +``` + ## base64 encode/decode strings Your Rhai customization can use the functions `base64::encode()` and `base64::decode()` to encode/decode strings. `encode()` does not fail, but `decode()` can fail, so always handle exceptions when using the `decode()` function. @@ -464,9 +490,10 @@ The following fields are identical in behavior to their `request` counterparts: * [`context`](#requestcontext) * [`headers`](#requestheaders) +* [`body`](#requestbody) * [`body.extensions`](#requestbodyextensions) -Note: Be particularly careful when interacting with headers in a response context. For supergraph_service() and execution_service(), response headers only exist for the first response in a deferred response stream. You can handle this by making use of the `is_primary()` function which will return true if a response is the first (or primary) response. If you do try to access the headers in a non-primary response, then you'll raise an exception which can be handled like any other rhai exception, but is not so convenient as using the `is_primary()` method. +Note: Be particularly careful when interacting with headers in a response context. For router_service(), supergraph_service() and execution_service(), response headers only exist for the first response in a deferred response stream. You can handle this by making use of the `is_primary()` function which will return true if a response is the first (or primary) response. If you do try to access the headers in a non-primary response, then you'll raise an exception which can be handled like any other rhai exception, but is not so convenient as using the `is_primary()` method. ```rhai if response.is_primary() { diff --git a/docs/source/customizations/rhai.mdx b/docs/source/customizations/rhai.mdx index 157c58a57a..14fe4e62ba 100644 --- a/docs/source/customizations/rhai.mdx +++ b/docs/source/customizations/rhai.mdx @@ -263,8 +263,9 @@ Each hook in your Rhai script's [main file](#main-file) is passed a `service` ob ```mermaid graph LR; client(Client); - client --> supergraph(supergraph_service); + client --> router(router_service); subgraph Apollo Router + router --> supergraph(supergraph_service); supergraph --> execution(execution_service); execution --> subs_a(subgraph_service); execution --> subs_b(subgraph_service); @@ -286,8 +287,9 @@ Each hook in your Rhai script's [main file](#main-file) is passed a `service` ob graph RL; client(Client); subgraph Apollo Router - supergraph(supergraph_service); execution(execution_service); + supergraph(supergraph_service); + router(router_service); subs_a(subgraph_service); subs_b(subgraph_service); end; @@ -298,13 +300,14 @@ Each hook in your Rhai script's [main file](#main-file) is passed a `service` ob subs_a --> execution; subs_b --> execution; execution --> supergraph; - supergraph --> client; + supergraph --> router;; + router --> client; class client,sub_a,sub_b secondary; ``` First, callbacks for `subgraph_service` are each passed the `response` from the corresponding subgraph. - Afterward, callbacks for `execution_service` and then `supergraph_service` are passed the combined `response` for the client that's assembled from all subgraph `response`s. + Afterward, callbacks for `execution_service`, `supergraph_service` and then `router_service` are passed the combined `response` for the client that's assembled from all subgraph `response`s. From e060463626004d2446ca08e6792b35d15f467922 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Tue, 5 Sep 2023 14:30:37 +0200 Subject: [PATCH 46/81] Do not record a trace if telemetry is not configured (#2999) This introduces a `SamplingFilter` that wraps `OpenTelemetryLayer`. The layer has an overhead on every request, because it records data for each span, even if no exporters are set up. The filter handles sampling ahead of the layer, only sending a trace to the layer when it is actually needed, ie when it is sampled, and an exporter was configured. This also reduces the overhead of sampling, by managing it outside of the OpenTelemetryLayer. It is configured through a sampling ratio stored in an atomic u64, that is modifed when the telemetry configuration is activated --- .changesets/fix_geal_filter_events_too.md | 6 + Cargo.lock | 1 + apollo-router/Cargo.toml | 1 + apollo-router/src/plugins/telemetry/mod.rs | 72 ++++++---- apollo-router/src/plugins/telemetry/reload.rs | 136 ++++++++++++++++-- apollo-router/src/router_factory.rs | 6 +- .../fixtures/jaeger-0.5-sample.router.yaml | 29 ++++ apollo-router/tests/jaeger_test.rs | 28 ++++ 8 files changed, 236 insertions(+), 43 deletions(-) create mode 100644 .changesets/fix_geal_filter_events_too.md create mode 100644 apollo-router/tests/fixtures/jaeger-0.5-sample.router.yaml diff --git a/.changesets/fix_geal_filter_events_too.md b/.changesets/fix_geal_filter_events_too.md new file mode 100644 index 0000000000..579d457ea6 --- /dev/null +++ b/.changesets/fix_geal_filter_events_too.md @@ -0,0 +1,6 @@ +### Do not record a trace if telemetry is not configured + +The OpenTelemetry handling code had a constant overhead on every request, due to the OpenTelemetryLayer recording data for every span, even when telemetry is not actually set up. We introduce a sampling filter that disables it entirely when no exporters are configured, which provides a performance boost in basic setups. +It also provides performance gains when exporters are set up: if a sampling ratio or client defined sampling are used, then the filter will only send the sampled traces to the rest of the stack, thus reducing the overhead again. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2999 diff --git a/Cargo.lock b/Cargo.lock index 705e12df56..fb568ac1a0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -339,6 +339,7 @@ dependencies = [ "opentelemetry-prometheus", "opentelemetry-semantic-conventions", "opentelemetry-zipkin", + "opentelemetry_api", "p256 0.12.0", "parking_lot 0.12.1", "paste", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index c46cefb639..ab458424d5 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -134,6 +134,7 @@ once_cell = "1.18.0" # https://github.com/apollographql/router/pull/1509. A comment which exists # there (and on `tracing` packages below) should be updated should this change. opentelemetry = { version = "0.19.0", features = ["rt-tokio", "metrics"] } +opentelemetry_api = "0.19.0" opentelemetry-datadog = { version = "0.7.0", features = ["reqwest-client"] } opentelemetry-http = "0.8.0" opentelemetry-jaeger = { version = "0.18.0", features = [ diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index f6a8ba5202..c96157e4e4 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -48,7 +48,6 @@ use tokio::runtime::Handle; use tower::BoxError; use tower::ServiceBuilder; use tower::ServiceExt; -use tracing_opentelemetry::OpenTelemetryLayer; use tracing_opentelemetry::OpenTelemetrySpanExt; use tracing_subscriber::fmt::format::JsonFields; use tracing_subscriber::Layer; @@ -60,17 +59,19 @@ use self::apollo::SingleReport; use self::apollo_exporter::proto; use self::apollo_exporter::Sender; use self::config::Conf; +use self::config::Sampler; +use self::config::SamplerOption; use self::formatters::text::TextFormatter; use self::metrics::apollo::studio::SingleTypeStat; use self::metrics::AttributesForwardConf; use self::metrics::MetricsAttributesConf; use self::reload::reload_fmt; use self::reload::reload_metrics; -use self::reload::LayeredRegistry; +use self::reload::LayeredTracer; use self::reload::NullFieldFormatter; +use self::reload::SamplingFilter; use self::reload::OPENTELEMETRY_TRACER_HANDLE; use self::tracing::apollo_telemetry::APOLLO_PRIVATE_DURATION_NS; -use self::tracing::reload::ReloadTracer; use crate::axum_factory::utils::REQUEST_SPAN_NAME; use crate::context::OPERATION_NAME; use crate::layers::ServiceBuilderExt; @@ -104,6 +105,7 @@ use crate::plugins::telemetry::utils::TracingUtils; use crate::query_planner::OperationKind; use crate::register_plugin; use crate::router_factory::Endpoint; +use crate::services::apollo_key; use crate::services::execution; use crate::services::router; use crate::services::subgraph; @@ -156,6 +158,7 @@ pub(crate) struct Telemetry { custom_endpoints: MultiMap, apollo_metrics_sender: apollo_exporter::Sender, field_level_instrumentation_ratio: f64, + sampling_filter_ratio: SamplerOption, tracer_provider: Option, meter_provider: AggregateMeterProvider, @@ -241,14 +244,16 @@ impl Plugin for Telemetry { config.calculate_field_level_instrumentation_ratio()?; let mut metrics_builder = Self::create_metrics_builder(&config)?; let meter_provider = metrics_builder.meter_provider(); + let (sampling_filter_ratio, tracer_provider) = Self::create_tracer_provider(&config)?; Ok(Telemetry { custom_endpoints: metrics_builder.custom_endpoints(), metrics_exporters: metrics_builder.exporters(), metrics: BasicMetrics::new(&meter_provider), apollo_metrics_sender: metrics_builder.apollo_metrics_provider(), field_level_instrumentation_ratio, - tracer_provider: Some(Self::create_tracer_provider(&config)?), + tracer_provider: Some(tracer_provider), meter_provider, + sampling_filter_ratio, config: Arc::new(config), }) } @@ -541,6 +546,8 @@ impl Telemetry { // Only apply things if we were executing in the context of a vanilla the Apollo executable. // Users that are rolling their own routers will need to set up telemetry themselves. if let Some(hot_tracer) = OPENTELEMETRY_TRACER_HANDLE.get() { + SamplingFilter::configure(&self.sampling_filter_ratio); + // The reason that this has to happen here is that we are interacting with global state. // If we do this logic during plugin init then if a subsequent plugin fails to init then we // will already have set the new tracer provider and we will be in an inconsistent state. @@ -612,20 +619,41 @@ impl Telemetry { fn create_tracer_provider( config: &config::Conf, - ) -> Result { + ) -> Result<(SamplerOption, opentelemetry::sdk::trace::TracerProvider), BoxError> { let tracing_config = config.tracing.clone().unwrap_or_default(); - let trace_config = &tracing_config.trace_config.unwrap_or_default(); - let mut builder = - opentelemetry::sdk::trace::TracerProvider::builder().with_config(trace_config.into()); + let mut trace_config = tracing_config.trace_config.unwrap_or_default(); + let mut sampler = trace_config.sampler; + // set it to AlwaysOn: it is now done in the SamplingFilter, so whatever is sent to an exporter + // should be accepted + trace_config.sampler = SamplerOption::Always(Sampler::AlwaysOn); + + // if APOLLO_KEY was set, the Studio exporter must be active + let apollo_config = if config.apollo.is_none() && apollo_key().is_some() { + Some(Default::default()) + } else { + config.apollo.clone() + }; + + let mut builder = opentelemetry::sdk::trace::TracerProvider::builder() + .with_config((&trace_config).into()); - builder = setup_tracing(builder, &tracing_config.jaeger, trace_config)?; - builder = setup_tracing(builder, &tracing_config.zipkin, trace_config)?; - builder = setup_tracing(builder, &tracing_config.datadog, trace_config)?; - builder = setup_tracing(builder, &tracing_config.otlp, trace_config)?; - builder = setup_tracing(builder, &config.apollo, trace_config)?; + builder = setup_tracing(builder, &tracing_config.jaeger, &trace_config)?; + builder = setup_tracing(builder, &tracing_config.zipkin, &trace_config)?; + builder = setup_tracing(builder, &tracing_config.datadog, &trace_config)?; + builder = setup_tracing(builder, &tracing_config.otlp, &trace_config)?; + builder = setup_tracing(builder, &apollo_config, &trace_config)?; + + if tracing_config.jaeger.is_none() + && tracing_config.zipkin.is_none() + && tracing_config.datadog.is_none() + && tracing_config.otlp.is_none() + && apollo_config.is_none() + { + sampler = SamplerOption::Always(Sampler::AlwaysOff); + } let tracer_provider = builder.build(); - Ok(tracer_provider) + Ok((sampler, tracer_provider)) } fn create_metrics_builder(config: &config::Conf) -> Result { @@ -664,21 +692,7 @@ impl Telemetry { Ok(builder) } - #[allow(clippy::type_complexity)] - fn create_fmt_layer( - config: &config::Conf, - ) -> Box< - dyn Layer< - ::tracing_subscriber::layer::Layered< - OpenTelemetryLayer< - LayeredRegistry, - ReloadTracer<::opentelemetry::sdk::trace::Tracer>, - >, - LayeredRegistry, - >, - > + Send - + Sync, - > { + fn create_fmt_layer(config: &config::Conf) -> Box + Send + Sync> { let logging = &config.logging; let fmt = match logging.format { config::LoggingFormat::Pretty => tracing_subscriber::fmt::layer() diff --git a/apollo-router/src/plugins/telemetry/reload.rs b/apollo-router/src/plugins/telemetry/reload.rs index 50ce48747a..064763405d 100644 --- a/apollo-router/src/plugins/telemetry/reload.rs +++ b/apollo-router/src/plugins/telemetry/reload.rs @@ -1,21 +1,33 @@ +use std::sync::atomic::AtomicU64; +use std::sync::atomic::Ordering; + use anyhow::anyhow; use anyhow::Result; use once_cell::sync::OnceCell; use opentelemetry::metrics::noop::NoopMeterProvider; use opentelemetry::sdk::trace::Tracer; +use opentelemetry::trace::TraceContextExt; use opentelemetry::trace::TracerProvider; +use rand::thread_rng; +use rand::Rng; use tower::BoxError; +use tracing_core::Subscriber; use tracing_opentelemetry::OpenTelemetryLayer; +use tracing_subscriber::filter::Filtered; use tracing_subscriber::fmt::FormatFields; +use tracing_subscriber::layer::Filter; use tracing_subscriber::layer::Layer; use tracing_subscriber::layer::Layered; use tracing_subscriber::layer::SubscriberExt; +use tracing_subscriber::registry::LookupSpan; use tracing_subscriber::reload::Handle; use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::EnvFilter; use tracing_subscriber::Registry; +use super::config::SamplerOption; use super::metrics::span_metrics_exporter::SpanMetricsLayer; +use crate::axum_factory::utils::REQUEST_SPAN_NAME; use crate::plugins::telemetry::formatters::filter_metric_events; use crate::plugins::telemetry::formatters::text::TextFormatter; use crate::plugins::telemetry::formatters::FilteringFormatter; @@ -25,8 +37,14 @@ use crate::plugins::telemetry::tracing::reload::ReloadTracer; pub(crate) type LayeredRegistry = Layered; -type LayeredTracer = - Layered>, LayeredRegistry>; +pub(super) type LayeredTracer = Layered< + Filtered< + OpenTelemetryLayer>, + SamplingFilter, + LayeredRegistry, + >, + LayeredRegistry, +>; // These handles allow hot tracing of layers. They have complex type definitions because tracing has // generic types in the layer definition. @@ -52,11 +70,15 @@ static FMT_LAYER_HANDLE: OnceCell< Handle + Send + Sync>, LayeredTracer>, > = OnceCell::new(); +pub(super) static SPAN_SAMPLING_RATE: AtomicU64 = AtomicU64::new(0); + pub(crate) fn init_telemetry(log_level: &str) -> Result<()> { let hot_tracer = ReloadTracer::new( opentelemetry::sdk::trace::TracerProvider::default().versioned_tracer("noop", None, None), ); - let opentelemetry_layer = tracing_opentelemetry::layer().with_tracer(hot_tracer.clone()); + let opentelemetry_layer = tracing_opentelemetry::layer() + .with_tracer(hot_tracer.clone()) + .with_filter(SamplingFilter::new()); // We choose json or plain based on tty let fmt = if atty::is(atty::Stream::Stdout) { @@ -130,20 +152,110 @@ pub(super) fn reload_metrics(layer: MetricsLayer) { } } -#[allow(clippy::type_complexity)] -pub(super) fn reload_fmt( - layer: Box< - dyn Layer< - Layered>, LayeredRegistry>, - > + Send - + Sync, - >, -) { +pub(super) fn reload_fmt(layer: Box + Send + Sync>) { if let Some(handle) = FMT_LAYER_HANDLE.get() { handle.reload(layer).expect("fmt layer reload must succeed"); } } +pub(crate) struct SamplingFilter {} + +#[allow(dead_code)] +impl SamplingFilter { + pub(crate) fn new() -> Self { + Self {} + } + + pub(super) fn configure(sampler: &SamplerOption) { + let ratio = match sampler { + SamplerOption::TraceIdRatioBased(ratio) => { + // can't use std::cmp::min because f64 is not Ord + if *ratio > 1.0 { + 1.0 + } else { + *ratio + } + } + SamplerOption::Always(s) => match s { + super::config::Sampler::AlwaysOn => 1f64, + super::config::Sampler::AlwaysOff => 0f64, + }, + }; + + SPAN_SAMPLING_RATE.store(f64::to_bits(ratio), Ordering::Relaxed); + } + + fn sample(&self) -> bool { + let s: f64 = thread_rng().gen_range(0.0..=1.0); + s <= f64::from_bits(SPAN_SAMPLING_RATE.load(Ordering::Relaxed)) + } +} + +impl Filter for SamplingFilter +where + S: Subscriber + for<'span> LookupSpan<'span>, +{ + fn enabled( + &self, + meta: &tracing::Metadata<'_>, + cx: &tracing_subscriber::layer::Context<'_, S>, + ) -> bool { + // we ignore events + if !meta.is_span() { + return false; + } + + // if there's an exsting otel context set by the client request, and it is sampled, + // then that trace is sampled + let current_otel_context = opentelemetry_api::Context::current(); + if current_otel_context.span().span_context().is_sampled() { + return true; + } + + let current_span = cx.current_span(); + if let Some(spanref) = current_span + // the current span, which is the parent of the span that might get enabled here, + // exists, but it might have been enabled by another layer like metrics + .id() + .and_then(|id| cx.span(id)) + { + // if this extension is set, that means the parent span was accepted, and so the + // entire trace is accepted + let extensions = spanref.extensions(); + return extensions.get::().is_some(); + } + + // we only make the sampling decision on the root span. If we reach here for any other span, + // it means that the parent span was not enabled, so we should not enable this span either + if meta.name() != REQUEST_SPAN_NAME { + return false; + } + + // - there's no parent span (it's the root), so we make the sampling decision + self.sample() + } + + fn on_new_span( + &self, + _attrs: &tracing_core::span::Attributes<'_>, + id: &tracing_core::span::Id, + ctx: tracing_subscriber::layer::Context<'_, S>, + ) { + let span = ctx.span(id).expect("Span not found, this is a bug"); + let mut extensions = span.extensions_mut(); + if extensions.get_mut::().is_none() { + extensions.insert(SampledSpan); + } + } + + fn on_close(&self, id: tracing_core::span::Id, ctx: tracing_subscriber::layer::Context<'_, S>) { + let span = ctx.span(&id).expect("Span not found, this is a bug"); + let mut extensions = span.extensions_mut(); + extensions.remove::(); + } +} + +struct SampledSpan; /// prevents span fields from being formatted to a string when writing logs pub(crate) struct NullFieldFormatter; diff --git a/apollo-router/src/router_factory.rs b/apollo-router/src/router_factory.rs index 6236d106d2..40f14b5155 100644 --- a/apollo-router/src/router_factory.rs +++ b/apollo-router/src/router_factory.rs @@ -573,9 +573,11 @@ pub(crate) async fn create_plugins( fn inject_schema_id(schema: &Schema, configuration: &mut Value) { if configuration.get("apollo").is_none() { + /*FIXME: do we really need to set a default configuration for telemetry.apollo ? if let Some(telemetry) = configuration.as_object_mut() { telemetry.insert("apollo".to_string(), Value::Object(Default::default())); - } + }*/ + return; } if let (Some(schema_id), Some(apollo)) = ( &schema.api_schema().schema_id, @@ -730,7 +732,7 @@ mod test { fn test_inject_schema_id() { let schema = include_str!("testdata/starstuff@current.graphql"); let schema = Schema::parse_test(schema, &Default::default()).unwrap(); - let mut config = json!({}); + let mut config = json!({ "apollo": {} }); inject_schema_id(&schema, &mut config); let config = serde_json::from_value::(config).unwrap(); diff --git a/apollo-router/tests/fixtures/jaeger-0.5-sample.router.yaml b/apollo-router/tests/fixtures/jaeger-0.5-sample.router.yaml new file mode 100644 index 0000000000..a5a11b2829 --- /dev/null +++ b/apollo-router/tests/fixtures/jaeger-0.5-sample.router.yaml @@ -0,0 +1,29 @@ +telemetry: + tracing: + experimental_response_trace_id: + enabled: true + header_name: apollo-custom-trace-id + propagation: + jaeger: true + trace_config: + service_name: router + sampler: 0.5 + jaeger: + batch_processor: + scheduled_delay: 100ms + agent: + endpoint: default + experimental_logging: + when_header: + - name: apollo-router-log-request + value: test + headers: true # default: false + body: true # default: false + # log request for all requests coming from Iphones + - name: custom-header + match: ^foo.* + headers: true +override_subgraph_url: + products: http://localhost:4005 +include_subgraph_errors: + all: true diff --git a/apollo-router/tests/jaeger_test.rs b/apollo-router/tests/jaeger_test.rs index f1aef966dc..eb1a10f026 100644 --- a/apollo-router/tests/jaeger_test.rs +++ b/apollo-router/tests/jaeger_test.rs @@ -122,6 +122,34 @@ async fn test_local_root_no_sample() -> Result<(), BoxError> { Ok(()) } +#[tokio::test(flavor = "multi_thread")] +async fn test_local_root_50_percent_sample() -> Result<(), BoxError> { + let mut router = IntegrationTest::builder() + .telemetry(Telemetry::Jaeger) + .config(include_str!("fixtures/jaeger-0.5-sample.router.yaml")) + .build() + .await; + + router.start().await; + router.assert_started().await; + let query = json!({"query":"query ExampleQuery {topProducts{name}}\n","variables":{}, "operationName": "ExampleQuery"}); + + for _ in 0..100 { + let (id, result) = router.execute_untraced_query(&query).await; + + if result.headers().get("apollo-custom-trace-id").is_some() + && validate_trace(id, &query, Some("ExampleQuery"), &["router", "products"]) + .await + .is_ok() + { + router.graceful_shutdown().await; + + return Ok(()); + } + } + panic!("tried 100 requests with telemetry sampled at 50%, no traces were found") +} + #[tokio::test(flavor = "multi_thread")] #[ignore] async fn test_no_telemetry() -> Result<(), BoxError> { From 6d7f53832db497a5b3e6345eec3232739d465258 Mon Sep 17 00:00:00 2001 From: Coenen Benjamin Date: Tue, 5 Sep 2023 14:33:36 +0200 Subject: [PATCH 47/81] fix(subscription): add x-accel-buffering header for multipart response (#3749) Set `x-accel-buffering` to `no` when it's a multipart response because proxies need this configuration. Fixes #3683 --------- Signed-off-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> --- .changesets/fix_bnjjj_fix_3683.md | 5 +++++ apollo-router/src/services/router_service.rs | 7 ++++++- apollo-router/tests/subscription_load_test.rs | 5 +++++ 3 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 .changesets/fix_bnjjj_fix_3683.md diff --git a/.changesets/fix_bnjjj_fix_3683.md b/.changesets/fix_bnjjj_fix_3683.md new file mode 100644 index 0000000000..ff130ff6f4 --- /dev/null +++ b/.changesets/fix_bnjjj_fix_3683.md @@ -0,0 +1,5 @@ +### fix(subscription): add x-accel-buffering header for multipart response ([Issue #3683](https://github.com/apollographql/router/issues/3683)) + +Set `x-accel-buffering` to `no` when it's a multipart response because proxies need this configuration. + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/3749 diff --git a/apollo-router/src/services/router_service.rs b/apollo-router/src/services/router_service.rs index 394f95fcbc..25c27a8b25 100644 --- a/apollo-router/src/services/router_service.rs +++ b/apollo-router/src/services/router_service.rs @@ -13,6 +13,7 @@ use futures::stream::StreamExt; use http::header::CONTENT_TYPE; use http::header::VARY; use http::HeaderMap; +use http::HeaderName; use http::HeaderValue; use http::Method; use http::StatusCode; @@ -306,6 +307,11 @@ impl RouterService { HeaderValue::from_static(MULTIPART_SUBSCRIPTION_CONTENT_TYPE), ); } + // Useful when you're using a proxy like nginx which enable proxy_buffering by default (http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_buffering) + parts.headers.insert( + HeaderName::from_static("x-accel-buffering"), + HeaderValue::from_static("no"), + ); let multipart_stream = match response.subscribed { Some(true) => { StreamBody::new(Multipart::new(body, ProtocolMode::Subscription)) @@ -315,7 +321,6 @@ impl RouterService { ProtocolMode::Defer, )), }; - let response = (parts, multipart_stream).into_response().map(|body| { // Axum makes this `body` have type: // https://docs.rs/http-body/0.4.5/http_body/combinators/struct.UnsyncBoxBody.html diff --git a/apollo-router/tests/subscription_load_test.rs b/apollo-router/tests/subscription_load_test.rs index 7ffbef9da0..d8e8f3d3bc 100644 --- a/apollo-router/tests/subscription_load_test.rs +++ b/apollo-router/tests/subscription_load_test.rs @@ -1,5 +1,6 @@ //! This file is to load test subscriptions and should be launched manually, not in our CI use futures::StreamExt; +use http::HeaderValue; use serde_json::json; use tower::BoxError; @@ -22,6 +23,10 @@ async fn test_subscription_load() -> Result<(), BoxError> { for i in 0..1000000i64 { let (_, response) = router.run_subscription(UNFEDERATED_SUB_QUERY).await; assert!(response.status().is_success()); + assert_eq!( + response.headers().get("x-accel-buffering").unwrap(), + &HeaderValue::from_static("no") + ); tokio::spawn(async move { let mut stream = response.bytes_stream(); From aef60514bb4abd45d9662ff6ca5d8d5054509998 Mon Sep 17 00:00:00 2001 From: Simon Sapin Date: Tue, 5 Sep 2023 14:36:31 +0200 Subject: [PATCH 48/81] Upgrade webpki and rustls-webpki crates (#3728) Brings fixes for: * https://rustsec.org/advisories/RUSTSEC-2023-0052 * https://rustsec.org/advisories/RUSTSEC-2023-0053 Fix #3645 Because Apollo Router does not accept client certificates, it could only be affected if a subgraph supplied a pathological TLS server certificate. --------- Co-authored-by: Geoffroy Couprie --- .changesets/maint_simon_pkiup.md | 11 +++++++++++ Cargo.lock | 4 ++-- deny.toml | 5 ++--- xtask/Cargo.lock | 8 ++++---- 4 files changed, 19 insertions(+), 9 deletions(-) create mode 100644 .changesets/maint_simon_pkiup.md diff --git a/.changesets/maint_simon_pkiup.md b/.changesets/maint_simon_pkiup.md new file mode 100644 index 0000000000..8e606d8631 --- /dev/null +++ b/.changesets/maint_simon_pkiup.md @@ -0,0 +1,11 @@ +### Upgrade webpki and rustls-webpki crates ([PR #3728](https://github.com/apollographql/router/pull/3728)) + +Brings fixes for: + +* https://rustsec.org/advisories/RUSTSEC-2023-0052 +* https://rustsec.org/advisories/RUSTSEC-2023-0053 + +Because Apollo Router does not accept client certificates, it could only be affected +if a subgraph supplied a pathological TLS server certificate. + +By [@SimonSapin](https://github.com/SimonSapin) in https://github.com/apollographql/router/pull/3728 diff --git a/Cargo.lock b/Cargo.lock index fb568ac1a0..cbcbc5699d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7200,9 +7200,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.22.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +checksum = "f0e74f82d49d545ad128049b7e88f6576df2da6b02e9ce565c6f533be576957e" dependencies = [ "ring", "untrusted", diff --git a/deny.toml b/deny.toml index 084bb9a883..45dc48653d 100644 --- a/deny.toml +++ b/deny.toml @@ -26,9 +26,8 @@ git-fetch-with-cli = true # A list of advisory IDs to ignore. Note that ignored advisories will still # output a note when they are encountered. -# RUSTSEC-2023-0052 and RUSTSEC-2023-0053 are pending a webpki update that is tracked by https://github.com/apollographql/router/issues/3645 -# and will be fixed by https://github.com/apollographql/router/pull/3643 -ignore = ["RUSTSEC-2023-0053", "RUSTSEC-2023-0052"] +# rustsec advisory exemptions +ignore = [] # This section is considered when running `cargo deny check licenses` # More documentation for the licenses section can be found here: diff --git a/xtask/Cargo.lock b/xtask/Cargo.lock index bdf077abae..a94d3cae59 100644 --- a/xtask/Cargo.lock +++ b/xtask/Cargo.lock @@ -1196,9 +1196,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.101.3" +version = "0.101.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "261e9e0888cba427c3316e6322805653c9425240b6fd96cee7cb671ab70ab8d0" +checksum = "7d93931baf2d282fff8d3a532bbfd7653f734643161b87e3e01e59a04439bf0d" dependencies = [ "ring", "untrusted", @@ -1732,9 +1732,9 @@ dependencies = [ [[package]] name = "webpki" -version = "0.22.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +checksum = "f0e74f82d49d545ad128049b7e88f6576df2da6b02e9ce565c6f533be576957e" dependencies = [ "ring", "untrusted", From cf122ed1d417bf49b6b56a16a4da31e4587f534d Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 5 Sep 2023 14:41:47 +0200 Subject: [PATCH 49/81] fix(deps): update rust crate router-bridge to v0.5.5+v2.5.4 (#3717) This PR contains the following updates: | Package | Type | Update | Change | |---|---|---|---| | [router-bridge](https://www.apollographql.com/apollo-federation/) ([source](https://togithub.com/apollographql/federation)) | dependencies | patch | `=0.5.4+v2.5.3` -> `=0.5.5+v2.5.4` | --- Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- apollo-router/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cbcbc5699d..9032b007f3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5153,9 +5153,9 @@ dependencies = [ [[package]] name = "router-bridge" -version = "0.5.4+v2.5.3" +version = "0.5.5+v2.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82d3e1bfc37d92eab53edcd17d4290b5aa8fb95ab43d0408f5d9b56157a6d61c" +checksum = "a33cdf930b79165fd6a0d3b94ccd930162103255db0dd1a7dd6625568b347539" dependencies = [ "anyhow", "async-channel", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index ab458424d5..33921ff1c6 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -172,7 +172,7 @@ reqwest = { version = "0.11.19", default-features = false, features = [ "stream", ] } # note: this dependency should _always_ be pinned, prefix the version with an `=` -router-bridge = "=0.5.4+v2.5.3" +router-bridge = "=0.5.5+v2.5.4" rust-embed = "6.8.1" rustls = "0.21.6" rustls-pemfile = "1.0.3" From f5ff6b167bb5564f05a41ef756d9d67160ec47cf Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Tue, 5 Sep 2023 14:49:18 +0200 Subject: [PATCH 50/81] GraphOS authorization: add an example of scope manipulation with router service level rhai (#3719) Co-authored-by: Maria Elisabeth Schreiber Co-authored-by: Edward Huang <18322228+shorgi@users.noreply.github.com> --- .../docs_geal_authorization_router_rhai.md | 5 +++ docs/source/configuration/authorization.mdx | 33 +++++++++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 .changesets/docs_geal_authorization_router_rhai.md diff --git a/.changesets/docs_geal_authorization_router_rhai.md b/.changesets/docs_geal_authorization_router_rhai.md new file mode 100644 index 0000000000..0e3596252b --- /dev/null +++ b/.changesets/docs_geal_authorization_router_rhai.md @@ -0,0 +1,5 @@ +### GraphOS authorization: add an example of scope manipulation with router service level rhai ([PR #3719](https://github.com/apollographql/router/pull/3719)) + +The router authorization directive `@requiresScopes` expects scopes to come from the `scope` claim in the OAuth2 access token format ( https://datatracker.ietf.org/doc/html/rfc6749#section-3.3 ). Some tokens may have scopes stored in a different way, like an array of strings, or even in different claims. This documents a way to extract the scopes and prepare them in the right format for consumption by `@requiresScopes`, ushing Rhai. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3719 \ No newline at end of file diff --git a/docs/source/configuration/authorization.mdx b/docs/source/configuration/authorization.mdx index 1b6d1e47f8..1a21b54a1c 100644 --- a/docs/source/configuration/authorization.mdx +++ b/docs/source/configuration/authorization.mdx @@ -140,6 +140,39 @@ claims = context["apollo_authentication::JWT::claims"] claims["scope"] = "scope1 scope2 scope3" ``` + + +If the `apollo_authentication::JWT::claims` object holds scopes in another format, for example, an array of strings, or at a key other than `"scope"`, you can edit the claims with a [Rhai script](../customizations/rhai). + +The example below extracts an array of scopes from the `"roles"` claim and reformats them as a space-separated string. + +```Rhai +fn router_service(service) { + let request_callback = |request| { + let claims = request.context["apollo_authentication::JWT::claims"]; + let roles = claims["roles"]; + + let scope = ""; + if roles.len() > 1 { + scope = roles[0]; + } + + if roles.len() > 2 { + for role in roles[1..] { + scope += ' '; + scope += role; + } + } + + claims["scope"] = scope; + request.context["apollo_authentication::JWT::claims"] = claims; + }; + service.map_request(request_callback); +} +``` + + + #### Usage To use the `@requiresScopes` directive in a subgraph, you can [import it from the `@link` directive](/federation/federated-types/federated-directives/#importing-directives) like so: From 5ed030ce6b8f9e3cb4b527461b6ba8417621df96 Mon Sep 17 00:00:00 2001 From: Coenen Benjamin Date: Tue, 5 Sep 2023 15:56:49 +0200 Subject: [PATCH 51/81] fix: handle correctly multipart stream if the original stream is empty (#3748) Fixes #3293 --------- Signed-off-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> Co-authored-by: Jeremy Lempereur --- .../fix_bnjjj_fix_empty_multipart_stream.md | 5 ++ apollo-router/src/protocols/multipart.rs | 74 +++++++++++++++++-- 2 files changed, 72 insertions(+), 7 deletions(-) create mode 100644 .changesets/fix_bnjjj_fix_empty_multipart_stream.md diff --git a/.changesets/fix_bnjjj_fix_empty_multipart_stream.md b/.changesets/fix_bnjjj_fix_empty_multipart_stream.md new file mode 100644 index 0000000000..68aa0de28a --- /dev/null +++ b/.changesets/fix_bnjjj_fix_empty_multipart_stream.md @@ -0,0 +1,5 @@ +### Handle multipart stream if the original stream is empty ([Issue #3293](https://github.com/apollographql/router/issues/3293)) + +For subscription and defer, in case the multipart response stream is empty then it should end correctly. + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/3748 \ No newline at end of file diff --git a/apollo-router/src/protocols/multipart.rs b/apollo-router/src/protocols/multipart.rs index b617c5ff50..59cb5f3212 100644 --- a/apollo-router/src/protocols/multipart.rs +++ b/apollo-router/src/protocols/multipart.rs @@ -8,6 +8,7 @@ use futures::stream::StreamExt; use futures::Stream; use serde::Serialize; use serde_json_bytes::Value; +use tokio_stream::once; use tokio_stream::wrappers::IntervalStream; use crate::graphql; @@ -36,8 +37,14 @@ struct SubscriptionPayload { errors: Vec, } +enum MessageKind { + Heartbeat, + Message(graphql::Response), + Eof, +} + pub(crate) struct Multipart { - stream: Pin> + Send>>, + stream: Pin + Send>>, is_first_chunk: bool, is_terminated: bool, mode: ProtocolMode, @@ -50,11 +57,14 @@ impl Multipart { { let stream = match mode { ProtocolMode::Subscription => select( - stream.map(Some), - IntervalStream::new(tokio::time::interval(HEARTBEAT_INTERVAL)).map(|_| None), + stream + .map(MessageKind::Message) + .chain(once(MessageKind::Eof)), + IntervalStream::new(tokio::time::interval(HEARTBEAT_INTERVAL)) + .map(|_| MessageKind::Heartbeat), ) .boxed(), - ProtocolMode::Defer => stream.map(Some).boxed(), + ProtocolMode::Defer => stream.map(MessageKind::Message).boxed(), }; Self { @@ -78,7 +88,7 @@ impl Stream for Multipart { } match self.stream.as_mut().poll_next(cx) { Poll::Ready(message) => match message { - Some(None) => { + Some(MessageKind::Heartbeat) => { // It's the ticker for heartbeat for subscription let buf = if self.is_first_chunk { self.is_first_chunk = false; @@ -93,7 +103,7 @@ impl Stream for Multipart { Poll::Ready(Some(Ok(buf))) } - Some(Some(mut response)) => { + Some(MessageKind::Message(mut response)) => { let mut buf = if self.is_first_chunk { self.is_first_chunk = false; Vec::from(&b"\r\n--graphql\r\ncontent-type: application/json\r\n\r\n"[..]) @@ -132,7 +142,26 @@ impl Stream for Multipart { Poll::Ready(Some(Ok(buf.into()))) } - None => Poll::Ready(None), + Some(MessageKind::Eof) => { + // If the stream ends or is empty + let buf = if self.is_first_chunk { + self.is_first_chunk = false; + Bytes::from_static( + &b"\r\n--graphql\r\ncontent-type: application/json\r\n\r\n{}\r\n--graphql--\r\n"[..] + ) + } else { + Bytes::from_static( + &b"content-type: application/json\r\n\r\n{}\r\n--graphql--\r\n"[..], + ) + }; + self.is_terminated = true; + + Poll::Ready(Some(Ok(buf))) + } + None => { + self.is_terminated = true; + Poll::Ready(None) + } }, Poll::Pending => Poll::Pending, } @@ -205,4 +234,35 @@ mod tests { } } } + + #[tokio::test] + async fn test_empty_stream() { + let responses = vec![]; + let gql_responses = stream::iter(responses); + + let mut protocol = Multipart::new(gql_responses, ProtocolMode::Subscription); + let heartbeat = String::from( + "\r\n--graphql\r\ncontent-type: application/json\r\n\r\n{}\r\n--graphql\r\n", + ); + let mut curr_index = 0; + while let Some(resp) = protocol.next().await { + let res = dbg!(String::from_utf8(resp.unwrap().to_vec()).unwrap()); + if res == heartbeat { + continue; + } else { + match curr_index { + 0 => { + assert_eq!( + res, + "\r\n--graphql\r\ncontent-type: application/json\r\n\r\n{}\r\n--graphql--\r\n" + ); + } + _ => { + panic!("should not happen, test failed"); + } + } + curr_index += 1; + } + } + } } From ce964d346a27310df460ef27f201356a92d419f7 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Tue, 5 Sep 2023 15:57:13 +0200 Subject: [PATCH 52/81] fix authenticated directive reporting (#3753) This is not a security issue, it only affects usage reporting for the `@authenticated` directive --- .changesets/fix_geal_fix_authenticated_reporting.md | 5 +++++ apollo-router/src/plugins/authorization/mod.rs | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 .changesets/fix_geal_fix_authenticated_reporting.md diff --git a/.changesets/fix_geal_fix_authenticated_reporting.md b/.changesets/fix_geal_fix_authenticated_reporting.md new file mode 100644 index 0000000000..ff189e1d7f --- /dev/null +++ b/.changesets/fix_geal_fix_authenticated_reporting.md @@ -0,0 +1,5 @@ +### Fix authenticated directive reporting ([PR #3753](https://github.com/apollographql/router/pull/3753)) + +The context key for the `@authenticated` directive only affects usage reporting + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/3753 \ No newline at end of file diff --git a/apollo-router/src/plugins/authorization/mod.rs b/apollo-router/src/plugins/authorization/mod.rs index 20bfe91db4..babcc91c1b 100644 --- a/apollo-router/src/plugins/authorization/mod.rs +++ b/apollo-router/src/plugins/authorization/mod.rs @@ -141,7 +141,7 @@ impl AuthorizationPlugin { // if this fails, the query is invalid and will fail at the query planning phase. // We do not return validation errors here for now because that would imply a huge // refactoring of telemetry and tests - if traverse::document(&mut visitor, file_id).is_ok() && !visitor.found { + if traverse::document(&mut visitor, file_id).is_ok() && visitor.found { context.insert(AUTHENTICATED_KEY, true).unwrap(); } From eb58fd488c6ae0a037700ab7e1fc6c4b0739c836 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 5 Sep 2023 14:25:19 +0000 Subject: [PATCH 53/81] fix(deps): update rust crate walkdir to 2.4.0 --- Cargo.lock | 4 ++-- apollo-router/Cargo.toml | 2 +- xtask/Cargo.lock | 4 ++-- xtask/Cargo.toml | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9032b007f3..44bc38cd31 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7080,9 +7080,9 @@ checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" [[package]] name = "walkdir" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" +checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" dependencies = [ "same-file", "winapi-util", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 33921ff1c6..7e8d6fed3e 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -297,7 +297,7 @@ tracing-subscriber = { version = "0.3", default-features = false, features = [ "fmt", ] } tracing-test = "0.2.4" -walkdir = "2.3.3" +walkdir = "2.4.0" wiremock = "0.5.19" [target.'cfg(target_os = "linux")'.dev-dependencies] diff --git a/xtask/Cargo.lock b/xtask/Cargo.lock index a94d3cae59..6bd8d563b2 100644 --- a/xtask/Cargo.lock +++ b/xtask/Cargo.lock @@ -1625,9 +1625,9 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" [[package]] name = "walkdir" -version = "2.3.3" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" +checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" dependencies = [ "same-file", "winapi-util", diff --git a/xtask/Cargo.toml b/xtask/Cargo.toml index b19d08afac..90412259fe 100644 --- a/xtask/Cargo.toml +++ b/xtask/Cargo.toml @@ -39,7 +39,7 @@ tempfile = "3" tinytemplate = "1.2.1" tokio = "1.32.0" which = "4" -walkdir = "2.3.3" +walkdir = "2.4.0" [target.'cfg(target_os = "macos")'.dependencies] base64 = "0.21" From a4adccfca7039f746dff826f073984c8ddfbacdd Mon Sep 17 00:00:00 2001 From: Nicolas Moutschen Date: Wed, 6 Sep 2023 10:56:16 +0200 Subject: [PATCH 54/81] feat(apollo-router): add support for GraphOS Cloud metrics (#3761) Fixes #3760 --- .changesets/maint_nicolas_otlp_filter.md | 5 +++++ apollo-router/src/plugins/telemetry/metrics/filter.rs | 9 ++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) create mode 100644 .changesets/maint_nicolas_otlp_filter.md diff --git a/.changesets/maint_nicolas_otlp_filter.md b/.changesets/maint_nicolas_otlp_filter.md new file mode 100644 index 0000000000..7bb2f7070c --- /dev/null +++ b/.changesets/maint_nicolas_otlp_filter.md @@ -0,0 +1,5 @@ +### Add support GraphOS Cloud metrics ([Issue #3760](https://github.com/apollographql/router/issues/3760)) + +Add support for GraphOS Cloud metrics in the Apollo OTLP Exporter. + +By [@nmoutschen](https://github.com/nmoutschen) in https://github.com/apollographql/router/pull/3761 \ No newline at end of file diff --git a/apollo-router/src/plugins/telemetry/metrics/filter.rs b/apollo-router/src/plugins/telemetry/metrics/filter.rs index 549da8eeed..919a073cd7 100644 --- a/apollo-router/src/plugins/telemetry/metrics/filter.rs +++ b/apollo-router/src/plugins/telemetry/metrics/filter.rs @@ -37,7 +37,7 @@ impl FilterMeterProvider { FilterMeterProvider::builder() .delegate(delegate) .allow( - Regex::new(r"apollo\.router\.(operations?|config)(\..*|$)") + Regex::new(r"apollo\.(graphos\.cloud|router\.(operations?|config))(\..*|$)") .expect("regex should have been valid"), ) .build() @@ -212,6 +212,7 @@ mod test { .versioned_meter("filtered", None, None); filtered.u64_counter("apollo.router.operations").init(); filtered.u64_counter("apollo.router.operations.test").init(); + filtered.u64_counter("apollo.graphos.cloud.test").init(); filtered.u64_counter("apollo.router.unknown.test").init(); assert!(delegate .instrument_provider @@ -225,6 +226,12 @@ mod test { .lock() .unwrap() .contains(&("apollo.router.operations".to_string(), None, None))); + assert!(delegate + .instrument_provider + .counters_created + .lock() + .unwrap() + .contains(&("apollo.graphos.cloud.test".to_string(), None, None))); assert!(!delegate .instrument_provider .counters_created From dbc892597da11398c84821bf763a31438082367a Mon Sep 17 00:00:00 2001 From: o0Ignition0o Date: Fri, 1 Sep 2023 10:41:21 +0200 Subject: [PATCH 55/81] Deal with interfaces on fragment spreads when no __typename is queried Fix #2587 Operations would over rely on the presence of __typename to resolve selection sets on interface implementers. This changeset checks for the parent type in an InlineFragment, so we don't drop relevant selection set when applicable. --- .../src/services/supergraph_service.rs | 131 ++++++++++++++++++ apollo-router/src/spec/query.rs | 23 +-- apollo-router/src/spec/query/tests.rs | 42 +++++- 3 files changed, 182 insertions(+), 14 deletions(-) diff --git a/apollo-router/src/services/supergraph_service.rs b/apollo-router/src/services/supergraph_service.rs index 7471a059b9..d7bad236b5 100644 --- a/apollo-router/src/services/supergraph_service.rs +++ b/apollo-router/src/services/supergraph_service.rs @@ -2924,4 +2924,135 @@ mod tests { insta::assert_json_snapshot!(stream.next_response().await.unwrap()); } + + #[tokio::test] + async fn no_typename_on_interface() { + let subgraphs = MockedSubgraphs([ + ("animal", MockSubgraph::builder().with_json( + serde_json::json!{{"query":"query dog__animal__0{dog{id name}}", "operationName": "dog__animal__0"}}, + serde_json::json!{{"data":{"dog":{"id":"4321","name":"Spot"}}}} + ).with_json( + serde_json::json!{{"query":"query dog__animal__0{dog{__typename id name}}", "operationName": "dog__animal__0"}}, + serde_json::json!{{"data":{"dog":{"__typename":"Dog","id":"4321","name":"Spot"}}}} + ).build()), + ].into_iter().collect()); + + let service = TestHarness::builder() + .configuration_json(serde_json::json!({"include_subgraph_errors": { "all": true } })) + .unwrap() + .schema( + r#"schema + @core(feature: "https://specs.apollo.dev/core/v0.2"), + @core(feature: "https://specs.apollo.dev/join/v0.1", for: EXECUTION) + { + query: Query + } + directive @core(as: String, feature: String!, for: core__Purpose) repeatable on SCHEMA + directive @join__field(graph: join__Graph, provides: join__FieldSet, requires: join__FieldSet) on FIELD_DEFINITION + directive @join__graph(name: String!, url: String!) on ENUM_VALUE + directive @join__owner(graph: join__Graph!) on INTERFACE | OBJECT + directive @join__type(graph: join__Graph!, key: join__FieldSet) repeatable on INTERFACE | OBJECT + + interface Animal { + id: String! + } + + type Dog implements Animal { + id: String! + name: String! + } + + type Query { + animal: Animal! @join__field(graph: ANIMAL) + dog: Dog! @join__field(graph: ANIMAL) + } + + enum core__Purpose { + """ + `EXECUTION` features provide metadata necessary to for operation execution. + """ + EXECUTION + + """ + `SECURITY` features provide metadata necessary to securely resolve fields. + """ + SECURITY + } + + scalar join__FieldSet + + enum join__Graph { + ANIMAL @join__graph(name: "animal" url: "http://localhost:8080/query") + } + "#, + ) + .extra_plugin(subgraphs) + .build_supergraph() + .await + .unwrap(); + + let request = supergraph::Request::fake_builder() + .context(defer_context()) + .query( + "query dog { + dog { + ...on Animal { + id + ...on Dog { + name + } + } + } + }", + ) + .build() + .unwrap(); + + let mut stream = service.clone().oneshot(request).await.unwrap(); + + let no_typename = stream.next_response().await.unwrap(); + + let request = supergraph::Request::fake_builder() + .context(defer_context()) + .query( + "query dog { + dog { + ...on Animal { + id + __typename + ...on Dog { + name + } + } + } + }", + ) + .build() + .unwrap(); + + let mut stream = service.oneshot(request).await.unwrap(); + + let with_typename = stream.next_response().await.unwrap(); + assert_eq!( + with_typename + .data + .clone() + .unwrap() + .get("dog") + .unwrap() + .get("name") + .unwrap(), + no_typename + .data + .clone() + .unwrap() + .get("dog") + .unwrap() + .get("name") + .unwrap(), + "{:?}\n{:?}", + with_typename, + no_typename + ); + } } diff --git a/apollo-router/src/spec/query.rs b/apollo-router/src/spec/query.rs index 10b9775310..8d44c55175 100644 --- a/apollo-router/src/spec/query.rs +++ b/apollo-router/src/spec/query.rs @@ -680,21 +680,18 @@ impl Query { let is_apply = if let Some(input_type) = input.get(TYPENAME).and_then(|val| val.as_str()) { - // check if the fragment matches the input type directly, and if not, check if the + // Only check if the fragment matches the input type directly, and if not, check if the // input type is a subtype of the fragment's type condition (interface, union) input_type == type_condition.as_str() || parameters.schema.is_subtype(type_condition, input_type) } else { - // known_type = true means that from the query's shape, we know - // we should get the right type here. But in the case we get a - // __typename field and it does not match, we should not apply - // that fragment - // If the type condition is an interface and the current known type implements it known_type - .as_ref() - .map(|k| parameters.schema.is_subtype(type_condition, k)) + .as_ref() + // We have no typename, we apply the selection set if the known_type implements the type_condition + .map(|k| is_subtype_or_same(parameters, type_condition, k)) .unwrap_or_default() - || known_type.as_deref() == Some(type_condition.as_str()) + // Or if the known_type implements the parent's type_condition because we're in an inline fragment. + || is_subtype_or_same(parameters, &parent_type.name(), type_condition) }; if is_apply { @@ -1072,6 +1069,14 @@ impl Query { } } +fn is_subtype_or_same( + parameters: &FormatParameters<'_>, + parent: &String, + maybe_child: &String, +) -> bool { + parent == maybe_child || parameters.schema.is_subtype(parent, maybe_child) +} + /// Intermediate structure for arguments passed through the entire formatting struct FormatParameters<'a> { variables: &'a Object, diff --git a/apollo-router/src/spec/query/tests.rs b/apollo-router/src/spec/query/tests.rs index 6da93dc0d4..32c08139ab 100644 --- a/apollo-router/src/spec/query/tests.rs +++ b/apollo-router/src/spec/query/tests.rs @@ -18,6 +18,26 @@ macro_rules! assert_eq_and_ordered { }; } +macro_rules! assert_eq_and_ordered_json { + ($a:expr, $b:expr $(,)?) => { + assert_eq!( + $a, + $b, + "assertion failed: objects are not the same:\ + \n left: `{}`\n right: `{}`", + serde_json::to_string(&$a).unwrap(), + serde_json::to_string(&$b).unwrap() + ); + assert!( + $a.eq_and_ordered(&$b), + "assertion failed: objects are not ordered the same:\ + \n left: `{}`\n right: `{}`", + serde_json::to_string(&$a).unwrap(), + serde_json::to_string(&$b).unwrap(), + ); + }; +} + #[derive(Default)] struct FormatTest { schema: Option<&'static str>, @@ -122,15 +142,21 @@ impl FormatTest { ); if let Some(e) = self.expected { - assert_eq_and_ordered!(response.data.as_ref().unwrap(), &e); + assert_eq_and_ordered_json!( + serde_json_bytes::to_value(response.data.as_ref()).unwrap(), + e + ); } if let Some(e) = self.expected_errors { - assert_eq_and_ordered!(serde_json_bytes::to_value(&response.errors).unwrap(), e); + assert_eq_and_ordered_json!(serde_json_bytes::to_value(&response.errors).unwrap(), e); } if let Some(e) = self.expected_extensions { - assert_eq_and_ordered!(serde_json_bytes::to_value(&response.extensions).unwrap(), e); + assert_eq_and_ordered_json!( + serde_json_bytes::to_value(&response.extensions).unwrap(), + e + ); } } } @@ -496,9 +522,15 @@ fn reformat_response_data_best_effort() { "baz": "2", }, "array": [ - {}, + { + "bar":null, + "baz":"3" + }, null, - {}, + { + "bar":"5", + "baz":null + } ], "other": null, }, From f719ee5485aef7452163a0835ca73f4071b0305e Mon Sep 17 00:00:00 2001 From: o0Ignition0o Date: Fri, 1 Sep 2023 10:48:11 +0200 Subject: [PATCH 56/81] changeset --- .changesets/fix_igni_typename_fragment_interfaces.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changesets/fix_igni_typename_fragment_interfaces.md diff --git a/.changesets/fix_igni_typename_fragment_interfaces.md b/.changesets/fix_igni_typename_fragment_interfaces.md new file mode 100644 index 0000000000..634bb740b0 --- /dev/null +++ b/.changesets/fix_igni_typename_fragment_interfaces.md @@ -0,0 +1,5 @@ +### Deal with interfaces on fragment spreads when no __typename is queried ([Issue #2587](https://github.com/apollographql/router/issues/2587)) + +Operations would over rely on the presence of __typename to resolve selection sets on interface implementers. This changeset checks for the parent type in an InlineFragment, so we don't drop relevant selection set when applicable. + +By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/3718 From 5494b3619f7fb5c2738ae5162227ec8892ab9d18 Mon Sep 17 00:00:00 2001 From: o0Ignition0o Date: Fri, 1 Sep 2023 14:31:49 +0200 Subject: [PATCH 57/81] move the logic to selection set generation --- .../src/services/supergraph_service.rs | 47 ++++++++++++++++++- apollo-router/src/spec/query.rs | 8 ++-- apollo-router/src/spec/schema.rs | 7 +++ apollo-router/src/spec/selection.rs | 11 ++++- 4 files changed, 65 insertions(+), 8 deletions(-) diff --git a/apollo-router/src/services/supergraph_service.rs b/apollo-router/src/services/supergraph_service.rs index d7bad236b5..4f13c364e8 100644 --- a/apollo-router/src/services/supergraph_service.rs +++ b/apollo-router/src/services/supergraph_service.rs @@ -2934,6 +2934,9 @@ mod tests { ).with_json( serde_json::json!{{"query":"query dog__animal__0{dog{__typename id name}}", "operationName": "dog__animal__0"}}, serde_json::json!{{"data":{"dog":{"__typename":"Dog","id":"4321","name":"Spot"}}}} + ).with_json( + serde_json::json!{{"query":"query dog__animal__0{dog{name id}}", "operationName": "dog__animal__0"}}, + serde_json::json!{{"data":{"dog":{"id":"4321","name":"Spot"}}}} ).build()), ].into_iter().collect()); @@ -3030,7 +3033,7 @@ mod tests { .build() .unwrap(); - let mut stream = service.oneshot(request).await.unwrap(); + let mut stream = service.clone().oneshot(request).await.unwrap(); let with_typename = stream.next_response().await.unwrap(); assert_eq!( @@ -3054,5 +3057,47 @@ mod tests { with_typename, no_typename ); + + let request = supergraph::Request::fake_builder() + .context(defer_context()) + .query( + "query dog { + dog { + ...on Dog { + name + ...on Animal { + id + } + } + } + }", + ) + .build() + .unwrap(); + + let mut stream = service.oneshot(request).await.unwrap(); + + let with_reversed_fragments = stream.next_response().await.unwrap(); + assert_eq!( + with_reversed_fragments + .data + .clone() + .unwrap() + .get("dog") + .unwrap() + .get("name") + .unwrap(), + no_typename + .data + .clone() + .unwrap() + .get("dog") + .unwrap() + .get("name") + .unwrap(), + "{:?}\n{:?}", + with_reversed_fragments, + no_typename + ); } } diff --git a/apollo-router/src/spec/query.rs b/apollo-router/src/spec/query.rs index 8d44c55175..1a588c2a78 100644 --- a/apollo-router/src/spec/query.rs +++ b/apollo-router/src/spec/query.rs @@ -686,12 +686,10 @@ impl Query { || parameters.schema.is_subtype(type_condition, input_type) } else { known_type - .as_ref() - // We have no typename, we apply the selection set if the known_type implements the type_condition - .map(|k| is_subtype_or_same(parameters, type_condition, k)) + .as_ref() + // We have no typename, we apply the selection set if the known_type implements the type_condition + .map(|k| is_subtype_or_same(parameters, type_condition, k)) .unwrap_or_default() - // Or if the known_type implements the parent's type_condition because we're in an inline fragment. - || is_subtype_or_same(parameters, &parent_type.name(), type_condition) }; if is_apply { diff --git a/apollo-router/src/spec/schema.rs b/apollo-router/src/spec/schema.rs index b34b180f37..892883339b 100644 --- a/apollo-router/src/spec/schema.rs +++ b/apollo-router/src/spec/schema.rs @@ -160,6 +160,13 @@ impl Schema { .unwrap_or(false) } + pub(crate) fn is_interface(&self, abstract_type: &str) -> bool { + self.type_system + .definitions + .interfaces + .contains_key(abstract_type) + } + /// Return an iterator over subgraphs that yields the subgraph name and its URL. pub(crate) fn subgraphs(&self) -> impl Iterator { self.subgraphs.iter() diff --git a/apollo-router/src/spec/selection.rs b/apollo-router/src/spec/selection.rs index 34728d419b..55ae883342 100644 --- a/apollo-router/src/spec/selection.rs +++ b/apollo-router/src/spec/selection.rs @@ -152,17 +152,24 @@ impl Selection { let fragment_type = FieldType::new_named(type_condition.clone()); + let relevant_type = if schema.is_interface(type_condition.as_str()) { + current_type + } else { + &fragment_type + }; + + let known_type = relevant_type.inner_type_name().map(|s| s.to_string()); + let selection_set = inline_fragment .selection_set() .selection() .iter() .filter_map(|selection| { - Selection::from_hir(selection, &fragment_type, schema, count, defer_stats) + Selection::from_hir(selection, relevant_type, schema, count, defer_stats) .transpose() }) .collect::>()?; - let known_type = current_type.inner_type_name().map(|s| s.to_string()); Some(Self::InlineFragment { type_condition, selection_set, From 7af9125e87dbce99401c52092db220be885bfbce Mon Sep 17 00:00:00 2001 From: o0Ignition0o Date: Fri, 1 Sep 2023 14:36:01 +0200 Subject: [PATCH 58/81] revert unused refacto --- apollo-router/src/spec/query.rs | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/apollo-router/src/spec/query.rs b/apollo-router/src/spec/query.rs index 1a588c2a78..6f6b5e02d3 100644 --- a/apollo-router/src/spec/query.rs +++ b/apollo-router/src/spec/query.rs @@ -688,8 +688,9 @@ impl Query { known_type .as_ref() // We have no typename, we apply the selection set if the known_type implements the type_condition - .map(|k| is_subtype_or_same(parameters, type_condition, k)) + .map(|k| parameters.schema.is_subtype(type_condition, k)) .unwrap_or_default() + || known_type.as_deref() == Some(type_condition.as_str()) }; if is_apply { @@ -1067,14 +1068,6 @@ impl Query { } } -fn is_subtype_or_same( - parameters: &FormatParameters<'_>, - parent: &String, - maybe_child: &String, -) -> bool { - parent == maybe_child || parameters.schema.is_subtype(parent, maybe_child) -} - /// Intermediate structure for arguments passed through the entire formatting struct FormatParameters<'a> { variables: &'a Object, From 642f8d1fdb2b78cb2fd15c4abb7cacd67b3f5947 Mon Sep 17 00:00:00 2001 From: o0Ignition0o Date: Fri, 1 Sep 2023 15:08:30 +0200 Subject: [PATCH 59/81] add one more test on best_effort with a more relevant output --- apollo-router/src/spec/query/tests.rs | 79 +++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) diff --git a/apollo-router/src/spec/query/tests.rs b/apollo-router/src/spec/query/tests.rs index 32c08139ab..61c6b35cef 100644 --- a/apollo-router/src/spec/query/tests.rs +++ b/apollo-router/src/spec/query/tests.rs @@ -539,6 +539,85 @@ fn reformat_response_data_best_effort() { .test(); } +#[test] +// just like the test above, except the query is one the planner would generate. +fn reformat_response_data_best_effort_relevant_query() { + FormatTest::builder() + .schema( + "type Query { + get: Thing + } + type Thing { + foo: String + stuff: Baz + array: [Element] + other: Bar + } + + type Baz { + bar: String + baz: String + } + + type Bar { + bar: String + } + + union Element = Baz | Bar + ", + ) + .query("{get{foo stuff{bar baz}array{...on Baz{bar baz}}other{bar}}}") + // the planner generates this: + // {get{foo stuff{bar baz}array{__typename ...on Baz{bar baz}}other{bar}}} + .response(json! { + { + "get": { + "foo": "1", + "stuff": {"baz": "2"}, + "array": [ + { + "__typename": "Baz", + "baz": "3" + }, + "4", + { + "__typename": "Baz", + "baz": "5" + }, + ], + "other": "6", + }, + "should_be_removed": { + "aaa": 2 + }, + } + }) + .expected(json! { + { + "get": { + "foo": "1", + "stuff": { + "bar": null, + "baz": "2", + }, + "array": [ + { + "bar":null, + "baz":"3" + }, + null, + { + "bar": null, + "baz":"5" + } + ], + "other": null, + }, + } + }) + .test(); +} + #[test] fn reformat_response_array_of_scalar_simple() { FormatTest::builder() From 0e2bceaf82cdc9e6ae5e6116cbcc78908b2ebd95 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Tue, 5 Sep 2023 11:49:20 +0200 Subject: [PATCH 60/81] use snapshots in the tests and different ids to recognize responses --- ..._service__tests__no_typename_on_interface-2.snap | 13 +++++++++++++ ..._service__tests__no_typename_on_interface-3.snap | 12 ++++++++++++ ...ph_service__tests__no_typename_on_interface.snap | 12 ++++++++++++ apollo-router/src/services/supergraph_service.rs | 7 +++++-- 4 files changed, 42 insertions(+), 2 deletions(-) create mode 100644 apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__no_typename_on_interface-2.snap create mode 100644 apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__no_typename_on_interface-3.snap create mode 100644 apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__no_typename_on_interface.snap diff --git a/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__no_typename_on_interface-2.snap b/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__no_typename_on_interface-2.snap new file mode 100644 index 0000000000..00772267f1 --- /dev/null +++ b/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__no_typename_on_interface-2.snap @@ -0,0 +1,13 @@ +--- +source: apollo-router/src/services/supergraph_service.rs +expression: with_typename +--- +{ + "data": { + "dog": { + "id": "8765", + "__typename": "Dog", + "name": "Spot" + } + } +} diff --git a/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__no_typename_on_interface-3.snap b/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__no_typename_on_interface-3.snap new file mode 100644 index 0000000000..f385b2bdd9 --- /dev/null +++ b/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__no_typename_on_interface-3.snap @@ -0,0 +1,12 @@ +--- +source: apollo-router/src/services/supergraph_service.rs +expression: with_reversed_fragments +--- +{ + "data": { + "dog": { + "name": "Spot", + "id": "0000" + } + } +} diff --git a/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__no_typename_on_interface.snap b/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__no_typename_on_interface.snap new file mode 100644 index 0000000000..2a443b1b15 --- /dev/null +++ b/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__no_typename_on_interface.snap @@ -0,0 +1,12 @@ +--- +source: apollo-router/src/services/supergraph_service.rs +expression: no_typename +--- +{ + "data": { + "dog": { + "id": "4321", + "name": "Spot" + } + } +} diff --git a/apollo-router/src/services/supergraph_service.rs b/apollo-router/src/services/supergraph_service.rs index 4f13c364e8..9f14a693a0 100644 --- a/apollo-router/src/services/supergraph_service.rs +++ b/apollo-router/src/services/supergraph_service.rs @@ -2933,10 +2933,10 @@ mod tests { serde_json::json!{{"data":{"dog":{"id":"4321","name":"Spot"}}}} ).with_json( serde_json::json!{{"query":"query dog__animal__0{dog{__typename id name}}", "operationName": "dog__animal__0"}}, - serde_json::json!{{"data":{"dog":{"__typename":"Dog","id":"4321","name":"Spot"}}}} + serde_json::json!{{"data":{"dog":{"__typename":"Dog","id":"8765","name":"Spot"}}}} ).with_json( serde_json::json!{{"query":"query dog__animal__0{dog{name id}}", "operationName": "dog__animal__0"}}, - serde_json::json!{{"data":{"dog":{"id":"4321","name":"Spot"}}}} + serde_json::json!{{"data":{"dog":{"id":"0000","name":"Spot"}}}} ).build()), ].into_iter().collect()); @@ -3014,6 +3014,7 @@ mod tests { let mut stream = service.clone().oneshot(request).await.unwrap(); let no_typename = stream.next_response().await.unwrap(); + insta::assert_json_snapshot!(no_typename); let request = supergraph::Request::fake_builder() .context(defer_context()) @@ -3057,6 +3058,7 @@ mod tests { with_typename, no_typename ); + insta::assert_json_snapshot!(with_typename); let request = supergraph::Request::fake_builder() .context(defer_context()) @@ -3099,5 +3101,6 @@ mod tests { with_reversed_fragments, no_typename ); + insta::assert_json_snapshot!(with_reversed_fragments); } } From 66edc80fe29941585f38f7ad305864b41bd911bd Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Tue, 5 Sep 2023 11:51:26 +0200 Subject: [PATCH 61/81] keep using the current type as known type at the fragment application --- apollo-router/src/spec/query/tests.rs | 10 ++-------- apollo-router/src/spec/selection.rs | 18 ++++++++++++++++-- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/apollo-router/src/spec/query/tests.rs b/apollo-router/src/spec/query/tests.rs index 61c6b35cef..e3493baf6d 100644 --- a/apollo-router/src/spec/query/tests.rs +++ b/apollo-router/src/spec/query/tests.rs @@ -522,15 +522,9 @@ fn reformat_response_data_best_effort() { "baz": "2", }, "array": [ - { - "bar":null, - "baz":"3" - }, + {}, null, - { - "bar":"5", - "baz":null - } + {} ], "other": null, }, diff --git a/apollo-router/src/spec/selection.rs b/apollo-router/src/spec/selection.rs index 55ae883342..5252a7f5d9 100644 --- a/apollo-router/src/spec/selection.rs +++ b/apollo-router/src/spec/selection.rs @@ -151,15 +151,29 @@ impl Selection { .ok_or_else(|| SpecError::InvalidType(current_type.to_string()))?; let fragment_type = FieldType::new_named(type_condition.clone()); + let known_type = current_type.inner_type_name().map(|s| s.to_string()); + // this is the type we pass when extracting the fragment's selections + // If the type condition is a union or interface and the current type implements it, then we want + // to keep the current type when extracting the fragment's selections, as it is more precise + // than the interface. + // If it is not, then we use the type condition let relevant_type = if schema.is_interface(type_condition.as_str()) { + // Query validation should have already verified that current type implements that interface + debug_assert!( + schema.is_subtype( + type_condition.as_str(), + current_type.inner_type_name().unwrap_or("") + ) || + // if the current type and the type condition are both the same interface, it is still valid + type_condition.as_str() + == current_type.inner_type_name().unwrap_or("") + ); current_type } else { &fragment_type }; - let known_type = relevant_type.inner_type_name().map(|s| s.to_string()); - let selection_set = inline_fragment .selection_set() .selection() From cb8d6181db0046cccd8b644fee9219c6cf149339 Mon Sep 17 00:00:00 2001 From: o0Ignition0o Date: Tue, 5 Sep 2023 12:59:10 +0200 Subject: [PATCH 62/81] add geoffroy to changeset --- .changesets/fix_igni_typename_fragment_interfaces.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changesets/fix_igni_typename_fragment_interfaces.md b/.changesets/fix_igni_typename_fragment_interfaces.md index 634bb740b0..5ed53a9ee9 100644 --- a/.changesets/fix_igni_typename_fragment_interfaces.md +++ b/.changesets/fix_igni_typename_fragment_interfaces.md @@ -2,4 +2,4 @@ Operations would over rely on the presence of __typename to resolve selection sets on interface implementers. This changeset checks for the parent type in an InlineFragment, so we don't drop relevant selection set when applicable. -By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/3718 +By [@o0Ignition0o](https://github.com/o0Ignition0o) and [@geal](https://github.com/geal) in https://github.com/apollographql/router/pull/3718 From 911185cf4cd1453ace7923f00d78ec3f45b191f6 Mon Sep 17 00:00:00 2001 From: Bryn Cooke Date: Wed, 6 Sep 2023 11:36:49 +0100 Subject: [PATCH 63/81] Fix metrics attribute types (#3724) Metrics attributes were being coerced to strings. This is now fixed. In addition, the logic around types accepted as metrics attributes has been simplified. It will log and ignore values of the wrong type. Fixes: #3687 **Checklist** Complete the checklist (and note appropriate exceptions) before a final PR is raised. - [ ] Changes are compatible[^1] - [ ] Documentation[^2] completed - [ ] Performance impact assessed and acceptable - Tests added and passing[^3] - [ ] Unit Tests - [ ] Integration Tests - [ ] Manual Tests **Exceptions** *Note any exceptions here* **Notes** [^1]. It may be appropriate to bring upcoming changes to the attention of other (impacted) groups. Please endeavour to do this before seeking PR approval. The mechanism for doing this will vary considerably, so use your judgement as to how and when to do this. [^2]. Configuration is an important part of many changes. Where applicable please try to document configuration examples. [^3]. Tick whichever testing boxes are applicable. If you are adding Manual Tests: - please document the manual testing (extensively) in the Exceptions. - please raise a separate issue to automate the test and label it (or ask for it to be labeled) as `manual test` --------- Co-authored-by: bryn --- .changesets/fix_bryn_fix_metrics_typing.md | 6 + .../axum_factory/axum_http_server_factory.rs | 6 +- apollo-router/src/axum_factory/listeners.rs | 4 +- apollo-router/src/configuration/metrics.rs | 54 +++- ...etrics__test__metrics@apq.router.yaml.snap | 6 +- ...st__metrics@authorization.router.yaml.snap | 4 +- ...@authorization_directives.router.yaml.snap | 4 +- ...test__metrics@coprocessor.router.yaml.snap | 12 +- ...s__test__metrics@entities.router.yaml.snap | 18 +- ...ics__test__metrics@limits.router.yaml.snap | 16 +- ...metrics@persisted_queries.router.yaml.snap | 6 +- ...st__metrics@subscriptions.router.yaml.snap | 10 +- ...__test__metrics@telemetry.router.yaml.snap | 12 +- ...__metrics@traffic_shaping.router.yaml.snap | 16 +- .../src/plugins/telemetry/metrics/layer.rs | 251 +++++++++++++++--- apollo-router/src/plugins/telemetry/mod.rs | 4 +- .../plugins/traffic_shaping/timeout/future.rs | 2 +- .../src/query_planner/bridge_query_planner.rs | 12 +- apollo-router/src/uplink/mod.rs | 4 +- 19 files changed, 339 insertions(+), 108 deletions(-) create mode 100644 .changesets/fix_bryn_fix_metrics_typing.md diff --git a/.changesets/fix_bryn_fix_metrics_typing.md b/.changesets/fix_bryn_fix_metrics_typing.md new file mode 100644 index 0000000000..ab4d6eef03 --- /dev/null +++ b/.changesets/fix_bryn_fix_metrics_typing.md @@ -0,0 +1,6 @@ +### Fix metrics attribute types ([Issue #3687](https://github.com/apollographql/router/issues/3687)) + +Metrics attributes were being coerced to strings. This is now fixed. +In addition, the logic around types accepted as metrics attributes has been simplified. It will log and ignore values of the wrong type. + +By [@BrynCooke](https://github.com/BrynCooke) in https://github.com/apollographql/router/pull/3724 diff --git a/apollo-router/src/axum_factory/axum_http_server_factory.rs b/apollo-router/src/axum_factory/axum_http_server_factory.rs index 1ef5fc452f..0b414f25b1 100644 --- a/apollo-router/src/axum_factory/axum_http_server_factory.rs +++ b/apollo-router/src/axum_factory/axum_http_server_factory.rs @@ -500,7 +500,7 @@ async fn handle_graphql( service: router::BoxService, http_request: Request, ) -> impl IntoResponse { - tracing::info!(counter.apollo_router_session_count_active = 1,); + tracing::info!(counter.apollo_router_session_count_active = 1i64,); let request: router::Request = http_request.into(); let context = request.context.clone(); @@ -518,7 +518,7 @@ async fn handle_graphql( match res { Err(e) => { - tracing::info!(counter.apollo_router_session_count_active = -1,); + tracing::info!(counter.apollo_router_session_count_active = -1i64,); if let Some(source_err) = e.source() { if source_err.is::() { return RateLimited::new().into_response(); @@ -541,7 +541,7 @@ async fn handle_graphql( .into_response() } Ok(response) => { - tracing::info!(counter.apollo_router_session_count_active = -1,); + tracing::info!(counter.apollo_router_session_count_active = -1i64,); let (mut parts, body) = response.response.into_parts(); let opt_compressor = accept_encoding diff --git a/apollo-router/src/axum_factory/listeners.rs b/apollo-router/src/axum_factory/listeners.rs index 24160afc22..9237fd68db 100644 --- a/apollo-router/src/axum_factory/listeners.rs +++ b/apollo-router/src/axum_factory/listeners.rs @@ -218,7 +218,7 @@ pub(super) fn serve_router_on_listen_addr( } tracing::info!( - counter.apollo_router_session_count_total = 1, + counter.apollo_router_session_count_total = 1i64, listener = &address ); @@ -312,7 +312,7 @@ pub(super) fn serve_router_on_listen_addr( } tracing::info!( - counter.apollo_router_session_count_total = -1, + counter.apollo_router_session_count_total = -1i64, listener = &address ); diff --git a/apollo-router/src/configuration/metrics.rs b/apollo-router/src/configuration/metrics.rs index 0e8d5e74fb..eb459b92e9 100644 --- a/apollo-router/src/configuration/metrics.rs +++ b/apollo-router/src/configuration/metrics.rs @@ -5,6 +5,7 @@ use std::time::Duration; use jsonpath_rust::JsonPathInst; use paste::paste; +use serde::Serialize; use serde_json::Value; use tokio::sync::OwnedSemaphorePermit; @@ -16,7 +17,39 @@ pub(crate) struct MetricsHandle { pub(crate) struct Metrics { yaml: Value, - metrics: HashMap)>, + metrics: HashMap)>, +} + +enum AttributeValue { + Bool(bool), + U64(u64), + I64(i64), + F64(f64), + String(String), +} + +impl Serialize for AttributeValue { + fn serialize(&self, serializer: S) -> Result { + match self { + AttributeValue::Bool(value) => serializer.serialize_bool(*value), + AttributeValue::U64(value) => serializer.serialize_u64(*value), + AttributeValue::I64(value) => serializer.serialize_i64(*value), + AttributeValue::F64(value) => serializer.serialize_f64(*value), + AttributeValue::String(value) => serializer.serialize_str(value), + } + } +} + +impl AttributeValue { + fn dyn_value(self: &AttributeValue) -> &dyn tracing::Value { + match self { + AttributeValue::Bool(value) => value as &dyn tracing::Value, + AttributeValue::U64(value) => value as &dyn tracing::Value, + AttributeValue::I64(value) => value as &dyn tracing::Value, + AttributeValue::F64(value) => value as &dyn tracing::Value, + AttributeValue::String(value) => value as &dyn tracing::Value, + } + } } impl Metrics { @@ -98,12 +131,19 @@ impl Metrics { let attr_name = stringify!([<$($attr __ )+>]).to_string(); match JsonPathInst::from_str($attr_path).expect("json path must be valid").find_slice(value).into_iter().next().as_deref() { // If the value is an object we can only state that it is set, but not what it is set to. - Some(Value::Object(_value)) => {attributes.insert(attr_name, "true".to_string());}, - Some(Value::Array(value)) if !value.is_empty() => {attributes.insert(attr_name, "true".to_string());}, + Some(Value::Object(_value)) => {attributes.insert(attr_name, AttributeValue::Bool(true));}, + Some(Value::Array(value)) if !value.is_empty() => {attributes.insert(attr_name, AttributeValue::Bool(true));}, // Scalars can be logged as is. - Some(value) => {attributes.insert(attr_name, value.to_string());}, + Some(Value::Number(value)) if value.is_f64() => {attributes.insert(attr_name, AttributeValue::F64(value.as_f64().expect("checked, qed")));}, + Some(Value::Number(value)) if value.is_i64() => {attributes.insert(attr_name, AttributeValue::I64(value.as_i64().expect("checked, qed")));}, + Some(Value::Number(value)) => {attributes.insert(attr_name, AttributeValue::U64(value.as_u64().expect("checked, qed")));}, + Some(Value::String(value)) => {attributes.insert(attr_name, AttributeValue::String(value.clone()));}, + Some(Value::Bool(value)) => {attributes.insert(attr_name, AttributeValue::Bool(*value));}, + // If the value is not set we don't specify the attribute. - None => {attributes.insert(attr_name, "false".to_string());}, + None => {attributes.insert(attr_name, AttributeValue::Bool(false));}, + + _ => {}, };)+ (1, attributes) } @@ -113,7 +153,7 @@ impl Metrics { let mut attributes = HashMap::new(); $( let attr_name = stringify!([<$($attr __ )+>]).to_string(); - attributes.insert(attr_name, "false".to_string()); + attributes.insert(attr_name, AttributeValue::Bool(false)); )+ (0, attributes) } @@ -122,7 +162,7 @@ impl Metrics { // Now log the metric paste!{ - tracing::info!($($metric).+ = metric.0, $($($attr).+ = metric.1.get(stringify!([<$($attr __ )+>])).expect("attribute must be in map")),+); + tracing::info!($($metric).+ = metric.0, $($($attr).+ = metric.1.get(stringify!([<$($attr __ )+>])).expect("attribute must be in map").dyn_value()),+); } }; } diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@apq.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@apq.router.yaml.snap index bf5efaf603..9108dfc7a1 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@apq.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@apq.router.yaml.snap @@ -4,7 +4,7 @@ expression: "&metrics.metrics" --- value.apollo.router.config.apq: - 1 - - opt__router__cache__in_memory__: "true" - opt__router__cache__redis__: "true" - opt__subgraph__: "true" + - opt__router__cache__in_memory__: true + opt__router__cache__redis__: true + opt__subgraph__: true diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization.router.yaml.snap index 11f9160614..e45a4962f7 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization.router.yaml.snap @@ -4,6 +4,6 @@ expression: "&metrics.metrics" --- value.apollo.router.config.authorization: - 1 - - opt__directives__: "false" - opt__require_authentication__: "true" + - opt__directives__: false + opt__require_authentication__: true diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization_directives.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization_directives.router.yaml.snap index 61b5d4c144..38462ec606 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization_directives.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@authorization_directives.router.yaml.snap @@ -4,6 +4,6 @@ expression: "&metrics.metrics" --- value.apollo.router.config.authorization: - 1 - - opt__directives__: "true" - opt__require_authentication__: "false" + - opt__directives__: true + opt__require_authentication__: false diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@coprocessor.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@coprocessor.router.yaml.snap index b5eb1df764..bdc1a7899b 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@coprocessor.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@coprocessor.router.yaml.snap @@ -4,10 +4,10 @@ expression: "&metrics.metrics" --- value.apollo.router.config.coprocessor: - 1 - - opt__router__request__: "true" - opt__router__response__: "true" - opt__subgraph__request__: "true" - opt__subgraph__response__: "true" - opt__supergraph__request__: "false" - opt__supergraph__response__: "false" + - opt__router__request__: true + opt__router__response__: true + opt__subgraph__request__: true + opt__subgraph__response__: true + opt__supergraph__request__: false + opt__supergraph__response__: false diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@entities.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@entities.router.yaml.snap index 1bce92d5c8..e4fe10d957 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@entities.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@entities.router.yaml.snap @@ -4,15 +4,15 @@ expression: "&metrics.metrics" --- value.apollo.router.config.entities: - 1 - - opt__cache__: "true" + - opt__cache__: true value.apollo.router.config.traffic_shaping: - 1 - - opt__router__rate_limit__: "false" - opt__router__timout__: "false" - opt__subgraph__compression__: "false" - opt__subgraph__deduplicate_query__: "false" - opt__subgraph__http2__: "false" - opt__subgraph__rate_limit__: "false" - opt__subgraph__retry__: "false" - opt__subgraph__timeout__: "false" + - opt__router__rate_limit__: false + opt__router__timout__: false + opt__subgraph__compression__: false + opt__subgraph__deduplicate_query__: false + opt__subgraph__http2__: false + opt__subgraph__rate_limit__: false + opt__subgraph__retry__: false + opt__subgraph__timeout__: false diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@limits.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@limits.router.yaml.snap index 53807bab66..055f60152d 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@limits.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@limits.router.yaml.snap @@ -4,12 +4,12 @@ expression: "&metrics.metrics" --- value.apollo.router.config.limits: - 1 - - opt__operation__max_aliases__: "true" - opt__operation__max_depth__: "true" - opt__operation__max_height__: "true" - opt__operation__max_root_fields__: "true" - opt__operation__warn_only__: "true" - opt__parser__max_recursion__: "true" - opt__parser__max_tokens__: "true" - opt__request__max_size__: "true" + - opt__operation__max_aliases__: true + opt__operation__max_depth__: true + opt__operation__max_height__: true + opt__operation__max_root_fields__: true + opt__operation__warn_only__: true + opt__parser__max_recursion__: true + opt__parser__max_tokens__: true + opt__request__max_size__: true diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@persisted_queries.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@persisted_queries.router.yaml.snap index 507f9c756f..72b803ca49 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@persisted_queries.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@persisted_queries.router.yaml.snap @@ -4,7 +4,7 @@ expression: "&metrics.metrics" --- value.apollo.router.config.persisted_queries: - 1 - - opt__log_unknown__: "true" - opt__safelist__enabled__: "true" - opt__safelist__require_id__: "true" + - opt__log_unknown__: true + opt__safelist__enabled__: true + opt__safelist__require_id__: true diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@subscriptions.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@subscriptions.router.yaml.snap index 3709a1603d..a019d34928 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@subscriptions.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@subscriptions.router.yaml.snap @@ -4,9 +4,9 @@ expression: "&metrics.metrics" --- value.apollo.router.config.subscriptions: - 1 - - opt__deduplication__: "false" - opt__max_opened__: "true" - opt__mode__callback__: "true" - opt__mode__passthrough__: "true" - opt__queue_capacity__: "true" + - opt__deduplication__: false + opt__max_opened__: true + opt__mode__callback__: true + opt__mode__passthrough__: true + opt__queue_capacity__: true diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@telemetry.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@telemetry.router.yaml.snap index 7e02cf7f31..8ea0c00cab 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@telemetry.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@telemetry.router.yaml.snap @@ -4,10 +4,10 @@ expression: "&metrics.metrics" --- value.apollo.router.config.telemetry: - 1 - - opt__metrics__otlp__: "true" - opt__metrics__prometheus__: "true" - opt__tracing__datadog__: "true" - opt__tracing__jaeger__: "true" - opt__tracing__otlp__: "true" - opt__tracing__zipkin__: "true" + - opt__metrics__otlp__: true + opt__metrics__prometheus__: true + opt__tracing__datadog__: true + opt__tracing__jaeger__: true + opt__tracing__otlp__: true + opt__tracing__zipkin__: true diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@traffic_shaping.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@traffic_shaping.router.yaml.snap index 1cdb685e7d..ab53cd0460 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@traffic_shaping.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@traffic_shaping.router.yaml.snap @@ -4,12 +4,12 @@ expression: "&metrics.metrics" --- value.apollo.router.config.traffic_shaping: - 1 - - opt__router__rate_limit__: "true" - opt__router__timout__: "true" - opt__subgraph__compression__: "true" - opt__subgraph__deduplicate_query__: "true" - opt__subgraph__http2__: "true" - opt__subgraph__rate_limit__: "true" - opt__subgraph__retry__: "true" - opt__subgraph__timeout__: "true" + - opt__router__rate_limit__: true + opt__router__timout__: true + opt__subgraph__compression__: true + opt__subgraph__deduplicate_query__: true + opt__subgraph__http2__: true + opt__subgraph__rate_limit__: true + opt__subgraph__retry__: true + opt__subgraph__timeout__: true diff --git a/apollo-router/src/plugins/telemetry/metrics/layer.rs b/apollo-router/src/plugins/telemetry/metrics/layer.rs index c195891d7b..f80b5552f9 100644 --- a/apollo-router/src/plugins/telemetry/metrics/layer.rs +++ b/apollo-router/src/plugins/telemetry/metrics/layer.rs @@ -24,7 +24,13 @@ use super::METRIC_PREFIX_HISTOGRAM; use super::METRIC_PREFIX_MONOTONIC_COUNTER; use super::METRIC_PREFIX_VALUE; -const I64_MAX: u64 = i64::MAX as u64; +macro_rules! log_and_panic_in_debug_build { + ($($tokens:tt)+) => {{ + tracing::debug!($($tokens)+); + #[cfg(debug_assertions)] + panic!("metric type error, see DEBUG log for details. Release builds will not panic but will still emit a debug log message"); + }}; +} #[derive(Default)] pub(crate) struct Instruments { @@ -159,69 +165,247 @@ pub(crate) struct MetricVisitor<'a> { pub(crate) metric: Option<(&'static str, InstrumentType)>, pub(crate) custom_attributes: Vec, pub(crate) meter: &'a Meter, + attributes_ignored: bool, +} + +impl<'a> MetricVisitor<'a> { + fn set_metric(&mut self, name: &'static str, instrument_type: InstrumentType) { + self.metric = Some((name, instrument_type)); + if self.attributes_ignored { + log_and_panic_in_debug_build!( + metric_name = name, + "metric attributes must be declared after the metric value. Some attributes have been ignored" + ); + } + } } impl<'a> Visit for MetricVisitor<'a> { - fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { - // Do not display the log content - if field.name() != "message" { + fn record_f64(&mut self, field: &Field, value: f64) { + if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { + self.set_metric(metric_name, InstrumentType::CounterF64(value)); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { + self.set_metric(metric_name, InstrumentType::UpDownCounterF64(value)); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { + self.set_metric(metric_name, InstrumentType::HistogramF64(value)); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { + log_and_panic_in_debug_build!( + metric_name, + "gauge must be u64. This metric will be ignored" + ); + } else if self.metric.is_some() { self.custom_attributes.push(KeyValue::new( Key::from_static_str(field.name()), - Value::from(format!("{value:?}")), + Value::from(value), )); + } else { + self.attributes_ignored = true } } - fn record_str(&mut self, field: &Field, value: &str) { - self.custom_attributes.push(KeyValue::new( - Key::from_static_str(field.name()), - Value::from(value.to_string()), - )); + fn record_i64(&mut self, field: &Field, value: i64) { + if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { + log_and_panic_in_debug_build!( + metric_name, + "monotonic counter must be u64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { + self.set_metric(metric_name, InstrumentType::UpDownCounterI64(value)); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { + self.set_metric(metric_name, InstrumentType::HistogramI64(value)); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { + log_and_panic_in_debug_build!( + metric_name, + "gauge must be u64. This metric will be ignored" + ); + } else if self.metric.is_some() { + self.custom_attributes.push(KeyValue::new( + Key::from_static_str(field.name()), + Value::from(value), + )); + } else { + self.attributes_ignored = true + } } fn record_u64(&mut self, field: &Field, value: u64) { if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { - self.metric = Some((metric_name, InstrumentType::CounterU64(value))); + self.set_metric(metric_name, InstrumentType::CounterU64(value)); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { - if value <= I64_MAX { - self.metric = Some((metric_name, InstrumentType::UpDownCounterI64(value as i64))); - } else { - eprintln!( - "[tracing-opentelemetry]: Received Counter metric, but \ - provided u64: {value} is greater than i64::MAX. Ignoring \ - this metric." - ); - } + log_and_panic_in_debug_build!( + metric_name, + "counter must be i64. This metric will be ignored" + ); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { - self.metric = Some((metric_name, InstrumentType::HistogramU64(value))); + self.set_metric(metric_name, InstrumentType::HistogramU64(value)); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { - self.metric = Some((metric_name, InstrumentType::GaugeU64(value))); + self.set_metric(metric_name, InstrumentType::GaugeU64(value)); + } else if self.metric.is_some() { + log_and_panic_in_debug_build!( + name = field.name(), + "metric attribute must be i64, f64, string or bool. This attribute will be ignored" + ); } else { - self.record_debug(field, &value); + self.attributes_ignored = true } } - fn record_f64(&mut self, field: &Field, value: f64) { + fn record_i128(&mut self, field: &Field, _value: i128) { if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { - self.metric = Some((metric_name, InstrumentType::CounterF64(value))); + log_and_panic_in_debug_build!( + metric_name, + "monotonic counter must be u64 or f64. This metric will be ignored" + ); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { - self.metric = Some((metric_name, InstrumentType::UpDownCounterF64(value))); + log_and_panic_in_debug_build!( + metric_name, + "counter must be i64. This metric will be ignored" + ); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { - self.metric = Some((metric_name, InstrumentType::HistogramF64(value))); + log_and_panic_in_debug_build!( + metric_name, + "histogram must be u64, i64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { + log_and_panic_in_debug_build!( + metric_name, + "gauge must be u64. This metric will be ignored" + ); + } else if self.metric.is_some() { + log_and_panic_in_debug_build!( + name = field.name(), + "metric attribute must be i64, f64, string or bool. This attribute will be ignored" + ); } else { - self.record_debug(field, &value); + self.attributes_ignored = true } } - fn record_i64(&mut self, field: &Field, value: i64) { + fn record_u128(&mut self, field: &Field, _value: u128) { if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { - self.metric = Some((metric_name, InstrumentType::CounterU64(value as u64))); + log_and_panic_in_debug_build!( + metric_name, + "monotonic counter must be u64 or f64. This metric will be ignored" + ); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { - self.metric = Some((metric_name, InstrumentType::UpDownCounterI64(value))); + log_and_panic_in_debug_build!( + metric_name, + "counter must be i64. This metric will be ignored" + ); } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { - self.metric = Some((metric_name, InstrumentType::HistogramI64(value))); + log_and_panic_in_debug_build!( + metric_name, + "histogram must be u64, i64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { + log_and_panic_in_debug_build!( + metric_name, + "gauge must be u64. This metric will be ignored" + ); + } else if self.metric.is_some() { + log_and_panic_in_debug_build!( + name = field.name(), + "metric attribute must be i64, f64, string or bool. This attribute will be ignored" + ); } else { - self.record_debug(field, &value); + self.attributes_ignored = true + } + } + + fn record_bool(&mut self, field: &Field, value: bool) { + if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { + log_and_panic_in_debug_build!( + metric_name, + "monotonic counter must be u64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { + log_and_panic_in_debug_build!( + metric_name, + "counter must be i64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { + log_and_panic_in_debug_build!( + metric_name, + "histogram must be u64, i64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { + log_and_panic_in_debug_build!( + metric_name, + "gauge must be u64. This metric will be ignored" + ); + } else if self.metric.is_some() { + self.custom_attributes.push(KeyValue::new( + Key::from_static_str(field.name()), + Value::from(value), + )); + } else { + self.attributes_ignored = true + } + } + + fn record_str(&mut self, field: &Field, value: &str) { + if field.name() != "message" { + if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { + log_and_panic_in_debug_build!( + metric_name, + "monotonic counter must be u64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { + log_and_panic_in_debug_build!( + metric_name, + "counter must be i64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { + log_and_panic_in_debug_build!( + metric_name, + "histogram must be u64, i64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { + log_and_panic_in_debug_build!( + metric_name, + "gauge must be u64. This metric will be ignored" + ); + } else if self.metric.is_some() { + self.custom_attributes.push(KeyValue::new( + Key::from_static_str(field.name()), + Value::from(value.to_string()), + )); + } else { + self.attributes_ignored = true + } + } + } + + fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { + if field.name() != "message" { + if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_MONOTONIC_COUNTER) { + log_and_panic_in_debug_build!( + metric_name, + "monotonic counter must be u64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_COUNTER) { + log_and_panic_in_debug_build!( + metric_name, + "counter must be i64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_HISTOGRAM) { + log_and_panic_in_debug_build!( + metric_name, + "histogram must be u64, i64 or f64. This metric will be ignored" + ); + } else if let Some(metric_name) = field.name().strip_prefix(METRIC_PREFIX_VALUE) { + log_and_panic_in_debug_build!( + metric_name, + "gauge must be u64. This metric will be ignored" + ); + } else if self.metric.is_some() { + self.custom_attributes.push(KeyValue::new( + Key::from_static_str(field.name()), + Value::from(format!("{value:?}")), + )); + } else { + self.attributes_ignored = true + } } } } @@ -265,6 +449,7 @@ where meter: &self.meter, metric: None, custom_attributes: Vec::new(), + attributes_ignored: false, }; event.record(&mut metric_visitor); metric_visitor.finish(); diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index c96157e4e4..0f46df3750 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -853,7 +853,7 @@ impl Telemetry { } ::tracing::info!( monotonic_counter.apollo.router.operations = 1u64, - http.response.status_code = parts.status.as_u16(), + http.response.status_code = parts.status.as_u16() as i64, ); let response = http::Response::from_parts( parts, @@ -869,7 +869,7 @@ impl Telemetry { ::tracing::info!( monotonic_counter.apollo.router.operations = 1u64, - http.response.status_code = 500, + http.response.status_code = 500i64, ); Err(err) } diff --git a/apollo-router/src/plugins/traffic_shaping/timeout/future.rs b/apollo-router/src/plugins/traffic_shaping/timeout/future.rs index 924fe6b215..8a390b393e 100644 --- a/apollo-router/src/plugins/traffic_shaping/timeout/future.rs +++ b/apollo-router/src/plugins/traffic_shaping/timeout/future.rs @@ -49,7 +49,7 @@ where match Pin::new(&mut this.sleep).poll(cx) { Poll::Pending => Poll::Pending, Poll::Ready(_) => { - tracing::info!(monotonic_counter.apollo_router_timeout = 1,); + tracing::info!(monotonic_counter.apollo_router_timeout = 1u64,); Poll::Ready(Err(Elapsed::new().into())) } } diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs index e4929420de..7805d127f9 100644 --- a/apollo-router/src/query_planner/bridge_query_planner.rs +++ b/apollo-router/src/query_planner/bridge_query_planner.rs @@ -91,7 +91,7 @@ impl BridgeQueryPlanner { if has_validation_errors && !schema.has_errors() { tracing::warn!( - monotonic_counter.apollo.router.validation = 1, + monotonic_counter.apollo.router.validation = 1u64, validation.source = VALIDATION_SOURCE_SCHEMA, validation.result = VALIDATION_FALSE_NEGATIVE, "validation mismatch: JS query planner reported a schema validation error, but apollo-rs did not" @@ -106,7 +106,7 @@ impl BridgeQueryPlanner { if configuration.experimental_graphql_validation_mode == GraphQLValidationMode::Both { if schema.has_errors() { tracing::warn!( - monotonic_counter.apollo.router.validation = 1, + monotonic_counter.apollo.router.validation = 1u64, validation.source = VALIDATION_SOURCE_SCHEMA, validation.result = VALIDATION_FALSE_POSITIVE, "validation mismatch: apollo-rs reported a schema validation error, but JS query planner did not" @@ -114,7 +114,7 @@ impl BridgeQueryPlanner { } else { // false_negative was an early return so we know it was correct here tracing::info!( - monotonic_counter.apollo.router.validation = 1, + monotonic_counter.apollo.router.validation = 1u64, validation.source = VALIDATION_SOURCE_SCHEMA, validation.result = VALIDATION_MATCH ); @@ -286,7 +286,7 @@ impl BridgeQueryPlanner { match (is_validation_error, &selections.validation_error) { (false, Some(_)) => { tracing::warn!( - monotonic_counter.apollo.router.validation = 1, + monotonic_counter.apollo.router.validation = 1u64, validation.source = VALIDATION_SOURCE_OPERATION, validation.result = VALIDATION_FALSE_POSITIVE, "validation mismatch: JS query planner did not report query validation error, but apollo-rs did" @@ -294,7 +294,7 @@ impl BridgeQueryPlanner { } (true, None) => { tracing::warn!( - monotonic_counter.apollo.router.validation = 1, + monotonic_counter.apollo.router.validation = 1u64, validation.source = VALIDATION_SOURCE_OPERATION, validation.result = VALIDATION_FALSE_NEGATIVE, "validation mismatch: apollo-rs did not report query validation error, but JS query planner did" @@ -302,7 +302,7 @@ impl BridgeQueryPlanner { } // if JS and Rust implementations agree, we return the JS result for now. _ => tracing::info!( - monotonic_counter.apollo.router.validation = 1, + monotonic_counter.apollo.router.validation = 1u64, validation.source = VALIDATION_SOURCE_OPERATION, validation.result = VALIDATION_MATCH, ), diff --git a/apollo-router/src/uplink/mod.rs b/apollo-router/src/uplink/mod.rs index b72ef91935..2fb38ef4d1 100644 --- a/apollo-router/src/uplink/mod.rs +++ b/apollo-router/src/uplink/mod.rs @@ -196,7 +196,7 @@ where match fetch::(&client, &query_body, &mut endpoints.iter()).await { Ok(response) => { tracing::info!( - counter.apollo_router_uplink_fetch_count_total = 1, + monotonic_counter.apollo_router_uplink_fetch_count_total = 1u64, status = "success", query ); @@ -245,7 +245,7 @@ where } Err(err) => { tracing::info!( - counter.apollo_router_uplink_fetch_count_total = 1, + monotonic_counter.apollo_router_uplink_fetch_count_total = 1u64, status = "failure", query ); From 0cceca78afd55f1136d77a64bb72b72b0a6e51de Mon Sep 17 00:00:00 2001 From: Jeremy Lempereur Date: Wed, 6 Sep 2023 13:09:34 +0200 Subject: [PATCH 64/81] Update rust toolchain to 1.72.0 (#3707) Update rust toolchain to 1.72.0 --- .changesets/maint_igni_rust_1_72_0.md | 5 +++++ Cargo.toml | 2 +- apollo-router-scaffold/templates/base/Dockerfile | 2 +- apollo-router/Cargo.toml | 2 +- apollo-router/README.md | 2 +- apollo-router/src/json_ext.rs | 2 +- apollo-router/src/plugins/authorization/mod.rs | 6 +++--- apollo-router/src/plugins/coprocessor_test.rs | 16 ++++++++-------- apollo-router/src/query_planner/execution.rs | 6 +++--- apollo-router/src/query_planner/labeler.rs | 5 +++-- apollo-router/src/query_planner/plan.rs | 2 +- apollo-router/src/services/execution_service.rs | 4 ++-- apollo-router/src/spec/operation_limits.rs | 5 ++--- apollo-router/src/spec/query/transform.rs | 15 +++++++++------ apollo-router/src/uplink/mod.rs | 2 +- docs/source/customizations/custom-binary.mdx | 2 +- rust-toolchain.toml | 4 ++-- 17 files changed, 45 insertions(+), 37 deletions(-) create mode 100644 .changesets/maint_igni_rust_1_72_0.md diff --git a/.changesets/maint_igni_rust_1_72_0.md b/.changesets/maint_igni_rust_1_72_0.md new file mode 100644 index 0000000000..9371f0dcfa --- /dev/null +++ b/.changesets/maint_igni_rust_1_72_0.md @@ -0,0 +1,5 @@ +### Update rust toolchain to 1.72.0 ([PR #3707](https://github.com/apollographql/router/pull/3707)) + +The router-bridge update now allows us to use the latest rust version. + +By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/3707 diff --git a/Cargo.toml b/Cargo.toml index 9c44480765..4861dbc154 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,4 +1,5 @@ [workspace] +resolver = "2" default-members = ["apollo-router"] members = [ "apollo-router", @@ -40,4 +41,3 @@ incremental = false [profile.release-dhat] inherits = "release" debug = 1 - diff --git a/apollo-router-scaffold/templates/base/Dockerfile b/apollo-router-scaffold/templates/base/Dockerfile index 3514b700dc..db76b6d0bb 100644 --- a/apollo-router-scaffold/templates/base/Dockerfile +++ b/apollo-router-scaffold/templates/base/Dockerfile @@ -1,6 +1,6 @@ # Use the rust build image from docker as our base # renovate-automation: rustc version -FROM rust:1.71.1 as build +FROM rust:1.72.0 as build # Set our working directory for the build WORKDIR /usr/src/router diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 7e8d6fed3e..4f91e8a2dc 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -8,7 +8,7 @@ description = "A configurable, high-performance routing runtime for Apollo Feder license = "Elastic-2.0" # renovate-automation: rustc version -rust-version = "1.71.1" +rust-version = "1.72.0" edition = "2021" build = "build/main.rs" diff --git a/apollo-router/README.md b/apollo-router/README.md index b2e5cec865..4934b8a099 100644 --- a/apollo-router/README.md +++ b/apollo-router/README.md @@ -27,4 +27,4 @@ Most Apollo Router features can be defined using our [YAML configuration](https: If you prefer to write customizations in Rust or need more advanced customizations, see our section on [native customizations](https://www.apollographql.com/docs/router/customizations/native) for information on how to use `apollo-router` as a Rust library. We also publish Rust-specific documentation on our [`apollo-router` crate docs](https://docs.rs/crate/apollo-router). -The minimum supported Rust version (MSRV) for this version of `apollo-router` is **1.71.1**. +The minimum supported Rust version (MSRV) for this version of `apollo-router` is **1.72.0**. diff --git a/apollo-router/src/json_ext.rs b/apollo-router/src/json_ext.rs index e05b010550..6955f15924 100644 --- a/apollo-router/src/json_ext.rs +++ b/apollo-router/src/json_ext.rs @@ -144,7 +144,7 @@ impl ValueExt for Value { a_value.deep_merge(b_value); } - a.extend(b.into_iter()); + a.extend(b); } (_, Value::Null) => {} (Value::Object(_), Value::Array(_)) => { diff --git a/apollo-router/src/plugins/authorization/mod.rs b/apollo-router/src/plugins/authorization/mod.rs index babcc91c1b..7bc42dd114 100644 --- a/apollo-router/src/plugins/authorization/mod.rs +++ b/apollo-router/src/plugins/authorization/mod.rs @@ -252,7 +252,7 @@ impl AuthorizationPlugin { let compiler = match filter_res { None => compiler, Some((query, paths)) => { - unauthorized_paths.extend(paths.into_iter()); + unauthorized_paths.extend(paths); if query.is_empty() { return Err(QueryPlannerError::Unauthorized(unauthorized_paths)); @@ -272,7 +272,7 @@ impl AuthorizationPlugin { let compiler = match filter_res { None => compiler, Some((query, paths)) => { - unauthorized_paths.extend(paths.into_iter()); + unauthorized_paths.extend(paths); if query.is_empty() { return Err(QueryPlannerError::Unauthorized(unauthorized_paths)); @@ -292,7 +292,7 @@ impl AuthorizationPlugin { let compiler = match filter_res { None => compiler, Some((query, paths)) => { - unauthorized_paths.extend(paths.into_iter()); + unauthorized_paths.extend(paths); if query.is_empty() { return Err(QueryPlannerError::Unauthorized(unauthorized_paths)); diff --git a/apollo-router/src/plugins/coprocessor_test.rs b/apollo-router/src/plugins/coprocessor_test.rs index 297391af5e..26ef229eb7 100644 --- a/apollo-router/src/plugins/coprocessor_test.rs +++ b/apollo-router/src/plugins/coprocessor_test.rs @@ -287,7 +287,7 @@ mod tests { Box::pin(async { Ok(hyper::Response::builder() .body(Body::from( - r##"{ + r#"{ "version": 1, "stage": "SubgraphRequest", "control": { @@ -299,7 +299,7 @@ mod tests { "body": "Errors need a message, this will fail to deserialize" }] } - }"##, + }"#, )) .unwrap()) }) @@ -386,7 +386,7 @@ mod tests { Box::pin(async { Ok(hyper::Response::builder() .body(Body::from( - r##"{ + r#"{ "version": 1, "stage": "SubgraphRequest", "control": "continue", @@ -429,7 +429,7 @@ mod tests { }, "serviceName": "service name shouldn't change", "uri": "http://thisurihaschanged" - }"##, + }"#, )) .unwrap()) }) @@ -478,7 +478,7 @@ mod tests { Box::pin(async { Ok(hyper::Response::builder() .body(Body::from( - r##"{ + r#"{ "version": 1, "stage": "SubgraphRequest", "control": { @@ -495,7 +495,7 @@ mod tests { "headers": { "aheader": ["a value"] } - }"##, + }"#, )) .unwrap()) }) @@ -556,7 +556,7 @@ mod tests { Box::pin(async { Ok(hyper::Response::builder() .body(Body::from( - r##"{ + r#"{ "version": 1, "stage": "SubgraphResponse", "headers": { @@ -598,7 +598,7 @@ mod tests { "this-is-a-test-context": 42 } } - }"##, + }"#, )) .unwrap()) }) diff --git a/apollo-router/src/query_planner/execution.rs b/apollo-router/src/query_planner/execution.rs index 0cb52f333f..35945844cc 100644 --- a/apollo-router/src/query_planner/execution.rs +++ b/apollo-router/src/query_planner/execution.rs @@ -439,7 +439,7 @@ impl DeferredNode { let (primary_value, primary_errors) = primary_receiver.recv().await.unwrap_or_default(); value.deep_merge(primary_value); - errors.extend(primary_errors.into_iter()) + errors.extend(primary_errors) } else { while let Some((v, _remaining)) = stream.next().await { // a Err(RecvError) means either that the fetch was not performed and the @@ -486,7 +486,7 @@ impl DeferredNode { let (primary_value, primary_errors) = primary_receiver.recv().await.unwrap_or_default(); v.deep_merge(primary_value); - errors.extend(primary_errors.into_iter()) + errors.extend(primary_errors) } if let Err(e) = tx @@ -511,7 +511,7 @@ impl DeferredNode { let (primary_value, primary_errors) = primary_receiver.recv().await.unwrap_or_default(); value.deep_merge(primary_value); - errors.extend(primary_errors.into_iter()); + errors.extend(primary_errors); if let Err(e) = tx .send( diff --git a/apollo-router/src/query_planner/labeler.rs b/apollo-router/src/query_planner/labeler.rs index cdf7cf4ea6..7bbb095eb7 100644 --- a/apollo-router/src/query_planner/labeler.rs +++ b/apollo-router/src/query_planner/labeler.rs @@ -65,8 +65,9 @@ impl<'a> Visitor for Labeler<'a> { ) -> Result, BoxError> { let parent_type = hir.type_condition().unwrap_or(parent_type); - let Some(selection_set) = selection_set(self, hir.selection_set(), parent_type)? - else { return Ok(None) }; + let Some(selection_set) = selection_set(self, hir.selection_set(), parent_type)? else { + return Ok(None); + }; let mut encoder_node = apollo_encoder::InlineFragment::new(selection_set); diff --git a/apollo-router/src/query_planner/plan.rs b/apollo-router/src/query_planner/plan.rs index b2f87ffe48..803ceaab3b 100644 --- a/apollo-router/src/query_planner/plan.rs +++ b/apollo-router/src/query_planner/plan.rs @@ -234,7 +234,7 @@ impl PlanNode { Self::Subscription { primary, rest } => match rest { Some(rest) => Box::new( rest.service_usage() - .chain(Some(primary.service_name.as_str()).into_iter()), + .chain(Some(primary.service_name.as_str())), ) as Box + 'a>, None => Box::new(Some(primary.service_name.as_str()).into_iter()), }, diff --git a/apollo-router/src/services/execution_service.rs b/apollo-router/src/services/execution_service.rs index 5e38fab3a6..3de41b2891 100644 --- a/apollo-router/src/services/execution_service.rs +++ b/apollo-router/src/services/execution_service.rs @@ -267,9 +267,9 @@ impl ExecutionService { schema.api_schema(), variables_set, ) - .into_iter(), + , ); - nullified_paths.extend(paths.into_iter()); + nullified_paths.extend(paths); }); match (response.path.as_ref(), response.data.as_ref()) { diff --git a/apollo-router/src/spec/operation_limits.rs b/apollo-router/src/spec/operation_limits.rs index e0ed96f15a..c6815382a9 100644 --- a/apollo-router/src/spec/operation_limits.rs +++ b/apollo-router/src/spec/operation_limits.rs @@ -81,12 +81,11 @@ pub(crate) fn check( debug_assert_eq!(ids.len(), 1); let query_id = ids[0]; - let Some(operation) = compiler.db.find_operation(query_id, operation_name.clone()) - else { + let Some(operation) = compiler.db.find_operation(query_id, operation_name.clone()) else { // Undefined or ambiguous operation name. // The request is invalid and will be rejected by some other part of the router, // if it wasn’t already before we got to this code path. - return Ok(()) + return Ok(()); }; let mut fragment_cache = HashMap::new(); diff --git a/apollo-router/src/spec/query/transform.rs b/apollo-router/src/spec/query/transform.rs index 3ece3b907a..76bfe0f9be 100644 --- a/apollo-router/src/spec/query/transform.rs +++ b/apollo-router/src/spec/query/transform.rs @@ -116,8 +116,9 @@ pub(crate) fn operation( .ok_or("ObjectTypeDefMissing")?; let type_name = object_type.name(); - let Some(selection_set) = selection_set(visitor, def.selection_set(), type_name)? - else { return Ok(None) }; + let Some(selection_set) = selection_set(visitor, def.selection_set(), type_name)? else { + return Ok(None); + }; let mut encoder_node = apollo_encoder::OperationDefinition::new(operation_type(def.operation_ty()), selection_set); @@ -151,8 +152,9 @@ pub(crate) fn fragment_definition( let name = hir.name(); let type_condition = hir.type_condition(); - let Some(selection_set) = selection_set(visitor, hir.selection_set(), type_condition)? - else { return Ok(None) }; + let Some(selection_set) = selection_set(visitor, hir.selection_set(), type_condition)? else { + return Ok(None); + }; let type_condition = apollo_encoder::TypeCondition::new(type_condition.into()); let mut encoder_node = @@ -237,8 +239,9 @@ pub(crate) fn inline_fragment( ) -> Result, BoxError> { let parent_type = hir.type_condition().unwrap_or(parent_type); - let Some(selection_set) = selection_set(visitor, hir.selection_set(), parent_type)? - else { return Ok(None) }; + let Some(selection_set) = selection_set(visitor, hir.selection_set(), parent_type)? else { + return Ok(None); + }; let mut encoder_node = apollo_encoder::InlineFragment::new(selection_set); diff --git a/apollo-router/src/uplink/mod.rs b/apollo-router/src/uplink/mod.rs index 2fb38ef4d1..1399a6b606 100644 --- a/apollo-router/src/uplink/mod.rs +++ b/apollo-router/src/uplink/mod.rs @@ -78,7 +78,7 @@ pub enum Endpoints { impl Default for Endpoints { fn default() -> Self { Self::fallback( - vec![GCP_URL, AWS_URL] + [GCP_URL, AWS_URL] .iter() .map(|url| Url::parse(url).expect("default urls must be valid")) .collect(), diff --git a/docs/source/customizations/custom-binary.mdx b/docs/source/customizations/custom-binary.mdx index 0934ac7cca..e5a7750124 100644 --- a/docs/source/customizations/custom-binary.mdx +++ b/docs/source/customizations/custom-binary.mdx @@ -20,7 +20,7 @@ import ElasticNotice from '../../shared/elastic-notice.mdx'; To compile the Apollo Router, you need to have the following installed: -* [Rust 1.71.1 or later](https://www.rust-lang.org/tools/install) +* [Rust 1.72.0 or later](https://www.rust-lang.org/tools/install) * [Node.js 16.9.1 or later](https://nodejs.org/en/download/) * [CMake 3.5.1 or later](https://cmake.org/download/) diff --git a/rust-toolchain.toml b/rust-toolchain.toml index d502368c84..4bfc48e551 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] # renovate-automation: rustc version -channel = "1.71.1" -components = [ "rustfmt", "clippy" ] +channel = "1.72.0" +components = ["rustfmt", "clippy"] From 961fff4df04aad2189c8b534ed74e563c06ad08a Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Wed, 6 Sep 2023 14:20:57 +0200 Subject: [PATCH 65/81] Add experimental caching metrics (#3558) Fix #3554 This creates a new metric recorded only if we set the configuration option `telemetry.metrics.common.experimental_cache_metrics.enabled` to `true`. * `apollo.router.operations.entity` (histogram): cache hit ratio per subgraph and entity type This simulates an entity cache to find out if it would be useful. Each time we do a subgraph query, we use as cache key: - subgraph name - entity type - query - vary headers - entity key We record if we have seen this entity before (using a bloom filter) and calculate the cache hit ratio for that query, per subgraph and entity type. --------- Signed-off-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> Co-authored-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> --- .changesets/maint_bnjjj_caching_metrics.md | 38 +++ Cargo.lock | 18 ++ apollo-router/Cargo.toml | 3 +- ...nfiguration__tests__schema_generation.snap | 17 ++ apollo-router/src/plugins/telemetry/config.rs | 23 ++ apollo-router/src/plugins/telemetry/mod.rs | 227 +++++++++++++++++- .../telemetry/tracing/apollo_telemetry.rs | 2 +- .../src/plugins/traffic_shaping/cache.rs | 91 +++++-- .../src/plugins/traffic_shaping/mod.rs | 5 +- 9 files changed, 393 insertions(+), 31 deletions(-) create mode 100644 .changesets/maint_bnjjj_caching_metrics.md diff --git a/.changesets/maint_bnjjj_caching_metrics.md b/.changesets/maint_bnjjj_caching_metrics.md new file mode 100644 index 0000000000..cd09700cfd --- /dev/null +++ b/.changesets/maint_bnjjj_caching_metrics.md @@ -0,0 +1,38 @@ +### Add experimental caching metrics ([PR #3532](https://github.com/apollographql/router/pull/3532)) + +It adds a metric only if you configure `telemetry.metrics.common.experimental_cache_metrics.enabled` to `true`. It will generate metrics to evaluate which entities would benefit from caching. It simulates a cache with a TTL, configurable at `telemetry.metrics.common.experimental_cache_metrics.ttl` (default: 5 seconds), and measures the cache hit rate per entity type and subgraph. + +example + +``` +# HELP apollo.router.operations.entity.cache_hit +# TYPE apollo_router_operations_entity.cache_hit histogram +apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="0.05"} 0 +apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="0.1"} 0 +apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="0.25"} 0 +apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="0.5"} 0 +apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="1"} 0 +apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="2.5"} 3 +apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="5"} 4 +apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="10"} 4 +apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="20"} 4 +apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="1000"} 4 +apollo_router_operations_entity_cache_hitbucket{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version="",le="+Inf"} 4 +apollo_router_operations_entity_cache_hitsum{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version=""} 7 +apollo_router_operations_entity_cache_hitcount{entity_type="Product",service_name="apollo-router",subgraph="products",otel_scope_name="apollo/router",otel_scope_version=""} 4 +apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="0.05"} 0 +apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="0.1"} 0 +apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="0.25"} 0 +apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="0.5"} 0 +apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="1"} 0 +apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="2.5"} 1 +apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="5"} 1 +apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="10"} 1 +apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="20"} 1 +apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="1000"} 1 +apollo_router_operations_entity_cache_hitbucket{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version="",le="+Inf"} 1 +apollo_router_operations_entity_cache_hitsum{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version=""} 1 +apollo_router_operations_entity_cache_hitcount{entity_type="User",service_name="apollo-router",subgraph="users",otel_scope_name="apollo/router",otel_scope_version=""} 1 +``` + +By [@bnjjj](https://github.com/bnjjj) [@Geal](https://github.com/geal) in https://github.com/apollographql/router/pull/3532 \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 44bc38cd31..212d0f17d8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -281,6 +281,7 @@ dependencies = [ "aws-types", "axum", "base64 0.21.2", + "bloomfilter", "brotli", "buildstructor 0.5.3", "bytes", @@ -1098,6 +1099,17 @@ dependencies = [ "generic-array 0.14.7", ] +[[package]] +name = "bloomfilter" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b92db7965d438b8b4b1c1d0aedd188440a1084593c9eb7f6657e3df7e906d934" +dependencies = [ + "bit-vec", + "getrandom 0.2.10", + "siphasher", +] + [[package]] name = "brotli" version = "3.3.4" @@ -5793,6 +5805,12 @@ dependencies = [ "time", ] +[[package]] +name = "siphasher" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54ac45299ccbd390721be55b412d41931911f654fa99e2cb8bfb57184b2061fe" + [[package]] name = "slab" version = "0.4.8" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 4f91e8a2dc..b9abf26a6b 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -67,6 +67,7 @@ async-trait = "0.1.73" atty = "0.2.14" axum = { version = "0.6.20", features = ["headers", "json", "original-uri"] } base64 = "0.21.2" +bloomfilter = "1.0.12" buildstructor = "0.5.3" bytes = "1.4.0" clap = { version = "4.4.2", default-features = false, features = [ @@ -163,6 +164,7 @@ prost = "0.11.9" prost-types = "0.11.9" proteus = "0.5.0" rand = "0.8.5" +rand_core = "0.6.4" rhai = { version = "1.15.1", features = ["sync", "serde", "internals"] } regex = "1.9.5" reqwest = { version = "0.11.19", default-features = false, features = [ @@ -236,7 +238,6 @@ memchr = "2.6.3" brotli = "3.3.4" zstd = "0.12.4" zstd-safe = "6.0.6" -rand_core = "0.6.4" # note: AWS dependencies should always use the same version aws-sigv4 = "0.56.0" aws-credential-types = "0.56.0" diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index e54c9226cd..ff9123ab31 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -4314,6 +4314,23 @@ expression: "&schema" "format": "double" } }, + "experimental_cache_metrics": { + "description": "Experimental metrics to know more about caching strategies", + "type": "object", + "properties": { + "enabled": { + "description": "Enable experimental metrics", + "default": false, + "type": "boolean" + }, + "ttl": { + "description": "Potential TTL for a cache if we had one (default: 5secs)", + "default": "5s", + "type": "string" + } + }, + "additionalProperties": false + }, "resources": { "description": "Resources", "default": {}, diff --git a/apollo-router/src/plugins/telemetry/config.rs b/apollo-router/src/plugins/telemetry/config.rs index 11e76f1bcf..c0cff8118f 100644 --- a/apollo-router/src/plugins/telemetry/config.rs +++ b/apollo-router/src/plugins/telemetry/config.rs @@ -94,6 +94,28 @@ pub(crate) struct MetricsCommon { /// Custom buckets for histograms #[serde(default = "default_buckets")] pub(crate) buckets: Vec, + /// Experimental metrics to know more about caching strategies + pub(crate) experimental_cache_metrics: ExperimentalCacheMetricsConf, +} + +#[derive(Clone, Debug, Deserialize, JsonSchema)] +#[serde(deny_unknown_fields, rename_all = "snake_case", default)] +pub(crate) struct ExperimentalCacheMetricsConf { + /// Enable experimental metrics + pub(crate) enabled: bool, + #[serde(with = "humantime_serde")] + #[schemars(with = "String")] + /// Potential TTL for a cache if we had one (default: 5secs) + pub(crate) ttl: Duration, +} + +impl Default for ExperimentalCacheMetricsConf { + fn default() -> Self { + Self { + enabled: false, + ttl: Duration::from_secs(5), + } + } } fn default_buckets() -> Vec { @@ -110,6 +132,7 @@ impl Default for MetricsCommon { service_namespace: None, resources: HashMap::new(), buckets: default_buckets(), + experimental_cache_metrics: ExperimentalCacheMetricsConf::default(), } } } diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index 0f46df3750..67450787e9 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -11,6 +11,7 @@ use ::tracing::field; use ::tracing::info_span; use ::tracing::Span; use axum::headers::HeaderName; +use bloomfilter::Bloom; use dashmap::DashMap; use futures::future::ready; use futures::future::BoxFuture; @@ -38,6 +39,7 @@ use opentelemetry::trace::TraceState; use opentelemetry::trace::TracerProvider; use opentelemetry::Context as OtelContext; use opentelemetry::KeyValue; +use parking_lot::Mutex; use rand::Rng; use router_bridge::planner::UsageReporting; use serde_json_bytes::json; @@ -72,6 +74,9 @@ use self::reload::NullFieldFormatter; use self::reload::SamplingFilter; use self::reload::OPENTELEMETRY_TRACER_HANDLE; use self::tracing::apollo_telemetry::APOLLO_PRIVATE_DURATION_NS; +use super::traffic_shaping::cache::hash_request; +use super::traffic_shaping::cache::hash_vary_headers; +use super::traffic_shaping::cache::REPRESENTATIONS; use crate::axum_factory::utils::REQUEST_SPAN_NAME; use crate::context::OPERATION_NAME; use crate::layers::ServiceBuilderExt; @@ -117,6 +122,7 @@ use crate::services::SubgraphRequest; use crate::services::SubgraphResponse; use crate::services::SupergraphRequest; use crate::services::SupergraphResponse; +use crate::spec::TYPENAME; use crate::tracer::TraceId; use crate::Context; use crate::ListenAddr; @@ -162,6 +168,7 @@ pub(crate) struct Telemetry { tracer_provider: Option, meter_provider: AggregateMeterProvider, + counter: Option>>, } #[derive(Debug)] @@ -244,7 +251,21 @@ impl Plugin for Telemetry { config.calculate_field_level_instrumentation_ratio()?; let mut metrics_builder = Self::create_metrics_builder(&config)?; let meter_provider = metrics_builder.meter_provider(); + let counter = config + .metrics + .as_ref() + .and_then(|m| m.common.as_ref()) + .and_then(|c| { + if c.experimental_cache_metrics.enabled { + Some(Arc::new(Mutex::new(CacheCounter::new( + c.experimental_cache_metrics.ttl, + )))) + } else { + None + } + }); let (sampling_filter_ratio, tracer_provider) = Self::create_tracer_provider(&config)?; + Ok(Telemetry { custom_endpoints: metrics_builder.custom_endpoints(), metrics_exporters: metrics_builder.exporters(), @@ -255,6 +276,7 @@ impl Plugin for Telemetry { meter_provider, sampling_filter_ratio, config: Arc::new(config), + counter, }) } @@ -477,7 +499,10 @@ impl Plugin for Telemetry { let subgraph_metrics_conf_req = self.create_subgraph_metrics_conf(name); let subgraph_metrics_conf_resp = subgraph_metrics_conf_req.clone(); let subgraph_name = ByteString::from(name); + let cache_metrics_enabled = self.counter.is_some(); + let counter = self.counter.clone(); let name = name.to_owned(); + let subgraph_name_arc = Arc::new(name.to_owned()); ServiceBuilder::new() .instrument(move |req: &SubgraphRequest| { let query = req @@ -502,7 +527,16 @@ impl Plugin for Telemetry { "apollo_private.ftv1" = field::Empty ) }) - .map_request(request_ftv1) + .map_request(move |mut req: SubgraphRequest| { + let cache_attributes = cache_metrics_enabled + .then(|| Self::get_cache_attributes(subgraph_name_arc.clone(), &mut req)) + .flatten(); + if let Some(cache_attributes) = cache_attributes { + req.context.private_entries.lock().insert(cache_attributes); + } + + request_ftv1(req) + }) .map_response(move |resp| store_ftv1(&subgraph_name, resp)) .map_future_with_request_data( move |sub_request: &SubgraphRequest| { @@ -510,13 +544,16 @@ impl Plugin for Telemetry { subgraph_metrics_conf_req.clone(), sub_request, ); - sub_request.context.clone() + let cache_attributes = sub_request.context.private_entries.lock().remove(); + + (sub_request.context.clone(), cache_attributes) }, - move |context: Context, + move |(context, cache_attributes): (Context, Option), f: BoxFuture<'static, Result>| { let metrics = metrics.clone(); let subgraph_attribute = subgraph_attribute.clone(); let subgraph_metrics_conf = subgraph_metrics_conf_resp.clone(); + let counter = counter.clone(); // Using Instant because it is guaranteed to be monotonically increasing. let now = Instant::now(); f.map(move |result: Result| { @@ -526,6 +563,8 @@ impl Plugin for Telemetry { subgraph_attribute, subgraph_metrics_conf, now, + counter, + cache_attributes, &result, ); result @@ -1032,6 +1071,63 @@ impl Telemetry { ) } + fn get_cache_attributes( + subgraph_name: Arc, + sub_request: &mut Request, + ) -> Option { + let body = dbg!(sub_request.subgraph_request.body_mut()); + let hashed_query = hash_request(body); + let representations = body + .variables + .get(REPRESENTATIONS) + .and_then(|value| value.as_array())?; + + let keys = extract_cache_attributes(representations).ok()?; + + Some(CacheAttributes { + subgraph_name, + headers: sub_request.subgraph_request.headers().clone(), + hashed_query: Arc::new(hashed_query), + representations: keys, + }) + } + + fn update_cache_metrics( + counter: Arc>, + sub_response: &SubgraphResponse, + cache_attributes: CacheAttributes, + ) { + let mut vary_headers = sub_response + .response + .headers() + .get_all(header::VARY) + .into_iter() + .filter_map(|val| { + val.to_str().ok().map(|v| { + v.to_string() + .split(", ") + .map(|s| s.to_string()) + .collect::>() + }) + }) + .flatten() + .collect::>(); + vary_headers.sort(); + let vary_headers = vary_headers.join(", "); + + let hashed_headers = if vary_headers.is_empty() { + Arc::default() + } else { + Arc::new(hash_vary_headers(&cache_attributes.headers)) + }; + counter.lock().record( + cache_attributes.hashed_query.clone(), + cache_attributes.subgraph_name.clone(), + hashed_headers, + cache_attributes.representations, + ); + } + fn store_subgraph_request_attributes( attribute_forward_config: Arc>, sub_request: &Request, @@ -1052,12 +1148,15 @@ impl Telemetry { .insert(SubgraphMetricsAttributes(attributes)); //.unwrap(); } + #[allow(clippy::too_many_arguments)] fn store_subgraph_response_attributes( context: &Context, metrics: BasicMetrics, subgraph_attribute: KeyValue, attribute_forward_config: Arc>, now: Instant, + counter: Option>>, + cache_attributes: Option, result: &Result, ) { let mut metric_attrs = { @@ -1088,6 +1187,21 @@ impl Telemetry { match &result { Ok(response) => { + if let Some(cache_attributes) = cache_attributes { + if let Ok(cache_control) = response + .response + .headers() + .get(header::CACHE_CONTROL) + .ok_or(()) + .and_then(|val| val.to_str().map(|v| v.to_string()).map_err(|_| ())) + { + metric_attrs.push(KeyValue::new("cache_control", cache_control)); + } + + if let Some(counter) = counter { + Self::update_cache_metrics(counter, response, cache_attributes) + } + } metric_attrs.push(KeyValue::new( "status", response.response.status().as_u16().to_string(), @@ -1554,6 +1668,113 @@ impl Telemetry { } } +#[derive(Debug, Clone)] +struct CacheAttributes { + subgraph_name: Arc, + headers: http::HeaderMap, + hashed_query: Arc, + // Typename + hashed_representation + representations: Vec<(Arc, Value)>, +} + +#[derive(Debug, Hash, Clone)] +struct CacheKey { + representation: Value, + typename: Arc, + query: Arc, + subgraph_name: Arc, + hashed_headers: Arc, +} + +// Get typename and hashed representation for each representations in the subgraph query +fn extract_cache_attributes( + representations: &[Value], +) -> Result, Value)>, BoxError> { + let mut res = Vec::new(); + for representation in representations { + let opt_type = representation + .as_object() + .and_then(|o| o.get(TYPENAME)) + .ok_or("missing __typename in representation")?; + let typename = opt_type.as_str().unwrap_or(""); + + res.push((Arc::new(typename.to_string()), representation.clone())); + } + Ok(res) +} + +struct CacheCounter { + primary: Bloom, + secondary: Bloom, + created_at: Instant, + ttl: Duration, +} + +impl CacheCounter { + fn new(ttl: Duration) -> Self { + Self { + primary: Self::make_filter(), + secondary: Self::make_filter(), + created_at: Instant::now(), + ttl, + } + } + + fn make_filter() -> Bloom { + // the filter is around 4kB in size (can be calculated with `Bloom::compute_bitmap_size`) + Bloom::new_for_fp_rate(10000, 0.2) + } + + fn record( + &mut self, + query: Arc, + subgraph_name: Arc, + hashed_headers: Arc, + representations: Vec<(Arc, Value)>, + ) { + if self.created_at.elapsed() >= self.ttl { + self.clear(); + } + + // typename -> (nb of cache hits, nb of entities) + let mut seen: HashMap, (usize, usize)> = HashMap::new(); + for (typename, representation) in representations { + let cache_hit = self.check(&CacheKey { + representation, + typename: typename.clone(), + query: query.clone(), + subgraph_name: subgraph_name.clone(), + hashed_headers: hashed_headers.clone(), + }); + + let seen_entry = seen.entry(typename.clone()).or_default(); + if cache_hit { + seen_entry.0 += 1; + } + seen_entry.1 += 1; + } + + for (typename, (cache_hit, total_entities)) in seen.into_iter() { + ::tracing::info!( + histogram.apollo.router.operations.entity.cache_hit = (cache_hit as f64 / total_entities as f64) * 100f64, + entity_type = %typename, + subgraph = %subgraph_name, + ); + } + } + + fn check(&mut self, key: &CacheKey) -> bool { + self.primary.check_and_set(key) || self.secondary.check(key) + } + + fn clear(&mut self) { + let secondary = std::mem::replace(&mut self.primary, Self::make_filter()); + self.secondary = secondary; + + self.created_at = Instant::now(); + } +} + fn filter_headers(headers: &HeaderMap, forward_rules: &ForwardHeaders) -> String { let headers_map = headers .iter() diff --git a/apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs b/apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs index 9489189891..25f260977c 100644 --- a/apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs +++ b/apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs @@ -847,7 +847,7 @@ mod test { use opentelemetry::Value; use prost::Message; use serde_json::json; - use crate::plugins::telemetry::apollo::{ErrorConfiguration}; + use crate::plugins::telemetry::apollo::ErrorConfiguration; use crate::plugins::telemetry::apollo_exporter::proto::reports::Trace; use crate::plugins::telemetry::apollo_exporter::proto::reports::trace::query_plan_node::{DeferNodePrimary, DeferredNode, ResponsePathElement}; use crate::plugins::telemetry::apollo_exporter::proto::reports::trace::{QueryPlanNode, Node, Error}; diff --git a/apollo-router/src/plugins/traffic_shaping/cache.rs b/apollo-router/src/plugins/traffic_shaping/cache.rs index f52ac67061..abb1e7031c 100644 --- a/apollo-router/src/plugins/traffic_shaping/cache.rs +++ b/apollo-router/src/plugins/traffic_shaping/cache.rs @@ -5,8 +5,10 @@ use std::time::Duration; use futures::future::BoxFuture; use futures::FutureExt; +use http::header; use serde::Deserialize; use serde::Serialize; +use serde_json_bytes::ByteString; use serde_json_bytes::Value; use sha2::Digest; use sha2::Sha256; @@ -25,6 +27,9 @@ use crate::json_ext::Object; use crate::services::subgraph; use crate::spec::TYPENAME; +const ENTITIES: &str = "_entities"; +pub(crate) const REPRESENTATIONS: &str = "representations"; + #[derive(Clone)] pub(crate) struct SubgraphCacheLayer { storage: RedisCacheStorage, @@ -83,14 +88,14 @@ where Poll::Ready(Ok(())) } - fn call(&mut self, mut request: subgraph::Request) -> Self::Future { + fn call(&mut self, request: subgraph::Request) -> Self::Future { let service = self.service.clone(); if !request .subgraph_request - .body_mut() + .body() .variables - .contains_key("representations") + .contains_key(REPRESENTATIONS) { return service.oneshot(request).boxed(); } @@ -118,9 +123,11 @@ where let body = request.subgraph_request.body_mut(); let query_hash = hash_request(body); + // TODO: compute TTL with cacheControl directive on the subgraph + let representations = body .variables - .get_mut("representations") + .get_mut(REPRESENTATIONS) .and_then(|value| value.as_array_mut()) .expect("we already checked that representations exist"); @@ -132,11 +139,11 @@ where .unwrap_or_else(|| std::iter::repeat(None).take(keys.len()).collect()); let (new_representations, mut result) = - filter_representations(representations, keys, cache_result)?; + filter_representations(&name, representations, keys, cache_result)?; if !new_representations.is_empty() { body.variables - .insert("representations", new_representations.into()); + .insert(REPRESENTATIONS, new_representations.into()); let mut response = service.oneshot(request).await?; @@ -145,7 +152,7 @@ where if let Some(mut entities) = data .as_mut() .and_then(|v| v.as_object_mut()) - .and_then(|o| o.remove("_entities")) + .and_then(|o| o.remove(ENTITIES)) { let new_entities = insert_entities_in_result( entities @@ -160,7 +167,7 @@ where data.as_mut() .and_then(|v| v.as_object_mut()) - .map(|o| o.insert("_entities", new_entities.into())); + .map(|o| o.insert(ENTITIES, new_entities.into())); response.response.body_mut().data = data; } @@ -168,7 +175,7 @@ where } else { let entities = insert_entities_in_result(&mut Vec::new(), &cache, &mut result).await?; let mut data = Object::default(); - data.insert("_entities", entities.into()); + data.insert(ENTITIES, entities.into()); Ok(subgraph::Response::builder() .data(data) @@ -178,14 +185,42 @@ where } } -fn hash_request(body: &graphql::Request) -> String { +pub(crate) fn hash_vary_headers(headers: &http::HeaderMap) -> String { + let mut digest = Sha256::new(); + + for vary_header_value in headers.get_all(header::VARY).into_iter() { + if vary_header_value == "*" { + return String::from("*"); + } else { + let header_names = match vary_header_value.to_str() { + Ok(header_val) => header_val.split(", "), + Err(_) => continue, + }; + header_names.for_each(|header_name| { + if let Some(header_value) = headers.get(header_name).and_then(|h| h.to_str().ok()) { + digest.update(header_value); + digest.update(&[0u8; 1][..]); + } + }); + } + } + + hex::encode(digest.finalize().as_slice()) +} + +pub(crate) fn hash_request(body: &mut graphql::Request) -> String { let mut digest = Sha256::new(); digest.update(body.query.as_deref().unwrap_or("-").as_bytes()); digest.update(&[0u8; 1][..]); digest.update(body.operation_name.as_deref().unwrap_or("-").as_bytes()); digest.update(&[0u8; 1][..]); + let repr_key = ByteString::from(REPRESENTATIONS); + // Removing the representations variable because it's already part of the cache key + let representations = body.variables.remove(&repr_key); digest.update(&serde_json::to_vec(&body.variables).unwrap()); - + if let Some(representations) = representations { + body.variables.insert(repr_key, representations); + } hex::encode(digest.finalize().as_slice()) } @@ -204,19 +239,21 @@ fn extract_cache_keys( reason: "missing __typename in representation".to_string(), })?; - let typename = opt_type.as_str().unwrap_or("-").to_string(); + let typename = opt_type.as_str().unwrap_or("-"); + + // We have to have representation because it can contains PII + let mut digest = Sha256::new(); + digest.update(serde_json::to_string(&representation).unwrap().as_bytes()); + let hashed_repr = hex::encode(digest.finalize().as_slice()); let key = format!( "subgraph.{}|{}|{}|{}", - subgraph_name, - &typename, - serde_json::to_string(&representation).unwrap(), - query_hash + subgraph_name, &typename, hashed_repr, query_hash ); representation .as_object_mut() - .map(|o| o.insert("__typename", opt_type)); + .map(|o| o.insert(TYPENAME, opt_type)); res.push(key); } Ok(res) @@ -230,6 +267,7 @@ struct IntermediateResult { // build a new list of representations without the ones we got from the cache fn filter_representations( + subgraph_name: &str, representations: &mut Vec, keys: Vec, mut cache_result: Vec>, @@ -245,7 +283,7 @@ fn filter_representations( { let opt_type = representation .as_object_mut() - .and_then(|o| o.remove("__typename")) + .and_then(|o| o.remove(TYPENAME)) .ok_or_else(|| FetchError::MalformedRequest { reason: "missing __typename in representation".to_string(), })?; @@ -257,7 +295,7 @@ fn filter_representations( representation .as_object_mut() - .map(|o| o.insert("__typename", opt_type)); + .map(|o| o.insert(TYPENAME, opt_type)); new_representations.push(representation); } else { cache_hit.entry(typename.clone()).or_default().0 += 1; @@ -270,11 +308,17 @@ fn filter_representations( } for (ty, (hit, miss)) in cache_hit { - tracing::event!( - Level::INFO, + tracing::info!( + monotonic_counter.apollo.router.operations.entity.cache = hit as u64, + entity_type = ty.as_str(), + hit = %true, + %subgraph_name + ); + tracing::info!( + monotonic_counter.apollo.router.operations.entity.cache = miss as u64, entity_type = ty.as_str(), - cache_hit = hit, - cache_miss = miss + miss = %true, + %subgraph_name ); } @@ -317,6 +361,7 @@ async fn insert_entities_in_result( } if !to_insert.is_empty() { + // TODO use insert_multiple_with_ttl cache.insert_multiple(&to_insert).await; } diff --git a/apollo-router/src/plugins/traffic_shaping/mod.rs b/apollo-router/src/plugins/traffic_shaping/mod.rs index 68b0a0d286..a85bdce659 100644 --- a/apollo-router/src/plugins/traffic_shaping/mod.rs +++ b/apollo-router/src/plugins/traffic_shaping/mod.rs @@ -6,7 +6,7 @@ //! * Compression //! * Rate limiting //! -mod cache; +pub(crate) mod cache; mod deduplication; pub(crate) mod rate; mod retry; @@ -379,10 +379,9 @@ impl TrafficShaping { let all_config = self.config.all.as_ref(); let subgraph_config = self.config.subgraphs.get(name); let final_config = Self::merge_config(all_config, subgraph_config); - let entity_caching = if let (Some(storage), Some(caching_config)) = ( self.storage.clone(), - subgraph_config + final_config .as_ref() .and_then(|c| c.experimental_entity_caching.as_ref()), ) { From 99ba9bd69fc817c48bc6ef6864c1ea6bf86a4545 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Wed, 6 Sep 2023 14:23:56 +0200 Subject: [PATCH 66/81] remove a cloned entity from response_at_path (#3759) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit there was always one additional clone before, even when the entity was used in only one place. In local benchmarks, I could see this remove 200μs from a query --- apollo-router/src/query_planner/fetch.rs | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/apollo-router/src/query_planner/fetch.rs b/apollo-router/src/query_planner/fetch.rs index 41e2b233e3..c7b4e7216f 100644 --- a/apollo-router/src/query_planner/fetch.rs +++ b/apollo-router/src/query_planner/fetch.rs @@ -367,11 +367,19 @@ impl FetchNode { if let Value::Array(array) = entities { let mut value = Value::default(); - for (path, entity_idx) in paths { - if let Some(entity) = array.get(entity_idx) { - let mut data = entity.clone(); - rewrites::apply_rewrites(schema, &mut data, &self.output_rewrites); - let _ = value.insert(&path, data); + for (index, mut entity) in array.into_iter().enumerate() { + rewrites::apply_rewrites(schema, &mut entity, &self.output_rewrites); + + if let Some(paths) = inverted_paths.get(&index) { + if paths.len() > 1 { + for path in &paths[1..] { + let _ = value.insert(path, entity.clone()); + } + } + + if let Some(path) = paths.first() { + let _ = value.insert(path, entity); + } } } return (value, errors); From 03dde952cd87507abb6d7a8d365b41106cc1ed55 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Wed, 6 Sep 2023 15:09:04 +0200 Subject: [PATCH 67/81] reference a github discussion about GraphOS Authorization (#3755) --- apollo-router/feature_discussions.json | 4 ++-- .../tests/snapshots/lifecycle_tests__cli_config_preview.snap | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/apollo-router/feature_discussions.json b/apollo-router/feature_discussions.json index 775e1b84b8..a972a3a506 100644 --- a/apollo-router/feature_discussions.json +++ b/apollo-router/feature_discussions.json @@ -6,6 +6,6 @@ "experimental_http_max_request_bytes": "https://github.com/apollographql/router/discussions/3220" }, "preview": { - "preview_directives": "https://github.com/apollographql/router/discussions/???" + "preview_directives": "https://github.com/apollographql/router/discussions/3754" } -} \ No newline at end of file +} diff --git a/apollo-router/tests/snapshots/lifecycle_tests__cli_config_preview.snap b/apollo-router/tests/snapshots/lifecycle_tests__cli_config_preview.snap index 7aea987112..9cd540f304 100644 --- a/apollo-router/tests/snapshots/lifecycle_tests__cli_config_preview.snap +++ b/apollo-router/tests/snapshots/lifecycle_tests__cli_config_preview.snap @@ -9,5 +9,5 @@ stderr: stdout: List of all preview configurations with related GitHub discussions: - - preview_directives: https://github.com/apollographql/router/discussions/??? + - preview_directives: https://github.com/apollographql/router/discussions/3754 From a7aa87e9eea91ff895329e643915c7df61b77439 Mon Sep 17 00:00:00 2001 From: Simon Sapin Date: Wed, 6 Sep 2023 15:36:45 +0200 Subject: [PATCH 68/81] Replace atty crate with std (#3729) The crate is unmaintained, and the standard library has equivalent functionality since Rust 1.70.0 * https://github.com/apollographql/router/security/dependabot/68 * https://doc.rust-lang.org/stable/std/io/trait.IsTerminal.html Co-authored-by: Geoffroy Couprie --- .changesets/maint_simon_is_terminal.md | 8 ++++++++ Cargo.lock | 1 - apollo-router/Cargo.toml | 1 - apollo-router/src/error.rs | 3 ++- apollo-router/src/plugins/telemetry/config.rs | 3 ++- apollo-router/src/plugins/telemetry/reload.rs | 3 ++- 6 files changed, 14 insertions(+), 5 deletions(-) create mode 100644 .changesets/maint_simon_is_terminal.md diff --git a/.changesets/maint_simon_is_terminal.md b/.changesets/maint_simon_is_terminal.md new file mode 100644 index 0000000000..7295cd62d3 --- /dev/null +++ b/.changesets/maint_simon_is_terminal.md @@ -0,0 +1,8 @@ +### Replace atty crate with std ([PR #3729](https://github.com/apollographql/router/pull/3729)) + +The crate is unmaintained, and the standard library has equivalent functionality since Rust 1.70.0 + +* https://github.com/apollographql/router/security/dependabot/68 +* https://doc.rust-lang.org/stable/std/io/trait.IsTerminal.html + +By [@SimonSapin](https://github.com/SimonSapin) in https://github.com/apollographql/router/pull/3729 diff --git a/Cargo.lock b/Cargo.lock index 212d0f17d8..2d3686a8f1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -274,7 +274,6 @@ dependencies = [ "askama", "async-compression", "async-trait", - "atty", "aws-config", "aws-credential-types", "aws-sigv4", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index b9abf26a6b..1e33618bb9 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -64,7 +64,6 @@ async-compression = { version = "0.4.1", features = [ "deflate", ] } async-trait = "0.1.73" -atty = "0.2.14" axum = { version = "0.6.20", features = ["headers", "json", "original-uri"] } base64 = "0.21.2" bloomfilter = "1.0.12" diff --git a/apollo-router/src/error.rs b/apollo-router/src/error.rs index fe844c04ee..2902d50b10 100644 --- a/apollo-router/src/error.rs +++ b/apollo-router/src/error.rs @@ -1,4 +1,5 @@ //! Router errors. +use std::io::IsTerminal; use std::sync::Arc; use displaydoc::Display; @@ -553,7 +554,7 @@ impl ValidationErrors { pub(crate) fn print(&self) { if LevelFilter::current() == LevelFilter::OFF && cfg!(not(debug_assertions)) { return; - } else if atty::is(atty::Stream::Stdout) { + } else if std::io::stdout().is_terminal() { // Fancy reports for TTYs self.errors.iter().for_each(|err| { // `format!` works around https://github.com/rust-lang/rust/issues/107118 diff --git a/apollo-router/src/plugins/telemetry/config.rs b/apollo-router/src/plugins/telemetry/config.rs index c0cff8118f..7d9418dee6 100644 --- a/apollo-router/src/plugins/telemetry/config.rs +++ b/apollo-router/src/plugins/telemetry/config.rs @@ -1,6 +1,7 @@ //! Configuration for the telemetry plugin. use std::collections::BTreeMap; use std::env; +use std::io::IsTerminal; use axum::headers::HeaderName; use opentelemetry::sdk::resource::EnvResourceDetector; @@ -300,7 +301,7 @@ pub(crate) enum LoggingFormat { impl Default for LoggingFormat { fn default() -> Self { - if atty::is(atty::Stream::Stdout) { + if std::io::stdout().is_terminal() { Self::Pretty } else { Self::Json diff --git a/apollo-router/src/plugins/telemetry/reload.rs b/apollo-router/src/plugins/telemetry/reload.rs index 064763405d..96296341e5 100644 --- a/apollo-router/src/plugins/telemetry/reload.rs +++ b/apollo-router/src/plugins/telemetry/reload.rs @@ -1,3 +1,4 @@ +use std::io::IsTerminal; use std::sync::atomic::AtomicU64; use std::sync::atomic::Ordering; @@ -81,7 +82,7 @@ pub(crate) fn init_telemetry(log_level: &str) -> Result<()> { .with_filter(SamplingFilter::new()); // We choose json or plain based on tty - let fmt = if atty::is(atty::Stream::Stdout) { + let fmt = if std::io::stdout().is_terminal() { tracing_subscriber::fmt::Layer::new() .event_format(FilteringFormatter::new( TextFormatter::new() From 49d33177c603ce8aa127de9c0a4e619592b3545c Mon Sep 17 00:00:00 2001 From: Bryn Cooke Date: Thu, 7 Sep 2023 08:00:06 +0100 Subject: [PATCH 69/81] Add apollo.router.id to otlp metrics metadata (#3764) Currently apollo metrics may have the same metadata between multiple routers. This means that we can't disambiguate when multiple routers are sending data. Add a random UUID to the metadata. This has no user facing impact. Note: there is no unit test or integration test for this. We need to take a broader look at how we do telemetry testing, and the solution should probably involve otel collector. However that is beyond the scope of this ticket. **Checklist** Complete the checklist (and note appropriate exceptions) before a final PR is raised. - [ ] Changes are compatible[^1] - [ ] Documentation[^2] completed - [ ] Performance impact assessed and acceptable - Tests added and passing[^3] - [ ] Unit Tests - [ ] Integration Tests - [ ] Manual Tests **Exceptions** *Note any exceptions here* **Notes** [^1]. It may be appropriate to bring upcoming changes to the attention of other (impacted) groups. Please endeavour to do this before seeking PR approval. The mechanism for doing this will vary considerably, so use your judgement as to how and when to do this. [^2]. Configuration is an important part of many changes. Where applicable please try to document configuration examples. [^3]. Tick whichever testing boxes are applicable. If you are adding Manual Tests: - please document the manual testing (extensively) in the Exceptions. - please raise a separate issue to automate the test and label it (or ask for it to be labeled) as `manual test` --------- Co-authored-by: bryn Co-authored-by: Brandt Kinzle <114622981+bkinzle@users.noreply.github.com> --- apollo-router/src/plugins/telemetry/metrics/apollo.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/apollo-router/src/plugins/telemetry/metrics/apollo.rs b/apollo-router/src/plugins/telemetry/metrics/apollo.rs index ef10ca8006..6424705c24 100644 --- a/apollo-router/src/plugins/telemetry/metrics/apollo.rs +++ b/apollo-router/src/plugins/telemetry/metrics/apollo.rs @@ -1,6 +1,7 @@ //! Apollo metrics use std::sync::atomic::AtomicBool; use std::sync::atomic::Ordering; +use std::sync::OnceLock; use std::time::Duration; use opentelemetry::sdk::export::metrics::aggregation; @@ -12,6 +13,7 @@ use sys_info::hostname; use tonic::metadata::MetadataMap; use tower::BoxError; use url::Url; +use uuid::Uuid; use crate::plugins::telemetry::apollo::Config; use crate::plugins::telemetry::apollo_exporter::get_uname; @@ -31,6 +33,9 @@ fn default_buckets() -> Vec { ] } +// Random unique UUID for the Router. This doesn't actually identify the router, it just allows disambiguation between multiple routers with the same metadata. +static ROUTER_ID: OnceLock = OnceLock::new(); + impl MetricsConfigurator for Config { fn apply( &self, @@ -105,6 +110,10 @@ impl Config { opentelemetry::runtime::Tokio, ) .with_resource(Resource::new([ + KeyValue::new( + "apollo.router.id", + ROUTER_ID.get_or_init(Uuid::new_v4).to_string(), + ), KeyValue::new("apollo.graph.ref", reference.to_string()), KeyValue::new("apollo.schema.id", schema_id.to_string()), KeyValue::new( From 44cfc9678867919ed7ddfefb1a463feeb4d5c56f Mon Sep 17 00:00:00 2001 From: Coenen Benjamin Date: Thu, 7 Sep 2023 11:00:55 +0200 Subject: [PATCH 70/81] fix(subscription): force the deduplication to be enabled by default as it's documented (#3773) `subscription.enable_deduplication` was documented to be `true` by default but it wasn't really the case. --------- Signed-off-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> Co-authored-by: Jeremy Lempereur --- ...njjj_subscription_default_configuration.md | 8 +++++ ...nfiguration__tests__schema_generation.snap | 6 ++-- apollo-router/src/plugins/subscription.rs | 29 +++++++++++++++---- 3 files changed, 33 insertions(+), 10 deletions(-) create mode 100644 .changesets/fix_bnjjj_subscription_default_configuration.md diff --git a/.changesets/fix_bnjjj_subscription_default_configuration.md b/.changesets/fix_bnjjj_subscription_default_configuration.md new file mode 100644 index 0000000000..2c4a5c207a --- /dev/null +++ b/.changesets/fix_bnjjj_subscription_default_configuration.md @@ -0,0 +1,8 @@ +### fix(subscription): force the deduplication to be enabled by default as it's documented ([PR #3773](https://github.com/apollographql/router/pull/3773)) + +A bug was introduced in router v1.25.0 which caused [subscription deduplication](https://www.apollographql.com/docs/router/executing-operations/subscription-support#subscription-deduplication) to be disabled by default. +As documented, the router will enable deduplication by default, providing you with subscriptions that scale. + +Should you decide to disable it, you can still explicitly set `enable_deduplication` to `false`. + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/3773 diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index ff9123ab31..14ab198d54 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -1641,17 +1641,15 @@ expression: "&schema" "subscription": { "description": "Subscriptions configuration", "type": "object", - "required": [ - "enabled" - ], "properties": { "enable_deduplication": { "description": "Enable the deduplication of subscription (for example if we detect the exact same request to subgraph we won't open a new websocket to the subgraph in passthrough mode) (default: true)", - "default": false, + "default": true, "type": "boolean" }, "enabled": { "description": "Enable subscription", + "default": true, "type": "boolean" }, "max_opened_subscriptions": { diff --git a/apollo-router/src/plugins/subscription.rs b/apollo-router/src/plugins/subscription.rs index 850f584b60..afd7e5a6d1 100644 --- a/apollo-router/src/plugins/subscription.rs +++ b/apollo-router/src/plugins/subscription.rs @@ -56,31 +56,32 @@ pub(crate) struct Subscription { /// Subscriptions configuration #[derive(Debug, Clone, Deserialize, Serialize, JsonSchema)] -#[serde(deny_unknown_fields)] +#[serde(deny_unknown_fields, default)] pub(crate) struct SubscriptionConfig { /// Enable subscription pub(crate) enabled: bool, /// Select a subscription mode (callback or passthrough) - #[serde(default)] pub(crate) mode: SubscriptionModeConfig, /// Enable the deduplication of subscription (for example if we detect the exact same request to subgraph we won't open a new websocket to the subgraph in passthrough mode) /// (default: true) - #[serde(default)] + #[serde(default = "enable_deduplication_default")] pub(crate) enable_deduplication: bool, /// This is a limit to only have maximum X opened subscriptions at the same time. By default if it's not set there is no limit. - #[serde(default)] pub(crate) max_opened_subscriptions: Option, /// It represent the capacity of the in memory queue to know how many events we can keep in a buffer - #[serde(default)] pub(crate) queue_capacity: Option, } +fn enable_deduplication_default() -> bool { + true +} + impl Default for SubscriptionConfig { fn default() -> Self { Self { enabled: true, mode: Default::default(), - enable_deduplication: true, + enable_deduplication: enable_deduplication_default(), max_opened_subscriptions: None, queue_capacity: None, } @@ -1268,6 +1269,22 @@ mod tests { let subgraph_cfg = config_without_mode.mode.get_subgraph_config("test"); assert_eq!(subgraph_cfg, None); + + let sub_config: SubscriptionConfig = serde_json::from_value(serde_json::json!({ + "mode": { + "preview_callback": { + "public_url": "http://localhost:4000", + "path": "/subscription/callback", + "subgraphs": ["test"] + } + } + })) + .unwrap(); + + assert!(sub_config.enabled); + assert!(sub_config.enable_deduplication); + assert!(sub_config.max_opened_subscriptions.is_none()); + assert!(sub_config.queue_capacity.is_none()); } } From 2915737aaa1086324ed108b3bd221f44586cd1b6 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 7 Sep 2023 09:02:37 +0000 Subject: [PATCH 71/81] fix(deps): update rust crate bytes to 1.5.0 --- Cargo.lock | 4 ++-- apollo-router/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2d3686a8f1..f203e3022b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1201,9 +1201,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be" +checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" [[package]] name = "bytes-utils" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 1e33618bb9..50c7235249 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -68,7 +68,7 @@ axum = { version = "0.6.20", features = ["headers", "json", "original-uri"] } base64 = "0.21.2" bloomfilter = "1.0.12" buildstructor = "0.5.3" -bytes = "1.4.0" +bytes = "1.5.0" clap = { version = "4.4.2", default-features = false, features = [ "env", "derive", From 81fc80362fcd983f34f22aeca82ec4a7050fb24b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9e?= Date: Fri, 8 Sep 2023 15:03:07 +0200 Subject: [PATCH 72/81] Add logs and extend metrics for `graphql_validation_mode: both` (#3674) This adds logging for query validation errors with either Rust or JS when there is a mismatch, i.e. one of them validates but the other does not. In other cases we are not really interested in the specific error (it will just go back to the user), so we don't need to log there. To log the Rust validation error well, I now store the ApolloDiagnostics that were produced on `Query{}`. `Query` is serializable for caching, but ApolloDiagnostic is not. Here I just skipped serializing `ApolloDiagnostic` so if `Query` is loaded from cache, it does not have the validation error stored. I'm not sure this is the right thing to do. The ApolloDiagnostics are later used after query planning (which may produce a JS validation error). So it's correct if we can ~safely assume that we only have valid Query instances cached. Otherwise we might get spurious error logs from this. - [ ] So is that a safe assumption? Reading the CachingQueryPlanner implementation I think it does only store errors (then it's not a `Query` instance) and fully successful planning (then it has run both Rust and JS validation already). So it looks fine, but it could be a bit brittle to rely on this. I also simplified the validation error printing which - [x] depends on https://github.com/apollographql/apollo-rs/pull/630. - [x] and on https://github.com/apollographql/router/pull/3675 **Checklist** Complete the checklist (and note appropriate exceptions) before a final PR is raised. - [ ] Changes are compatible[^1] - [ ] Documentation[^2] completed - [ ] Performance impact assessed and acceptable - Tests added and passing[^3] - [ ] Unit Tests - [ ] Integration Tests - [ ] Manual Tests **Exceptions** *Note any exceptions here* **Notes** [^1]. It may be appropriate to bring upcoming changes to the attention of other (impacted) groups. Please endeavour to do this before seeking PR approval. The mechanism for doing this will vary considerably, so use your judgement as to how and when to do this. [^2]. Configuration is an important part of many changes. Where applicable please try to document configuration examples. [^3]. Tick whichever testing boxes are applicable. If you are adding Manual Tests: - please document the manual testing (extensively) in the Exceptions. - please raise a separate issue to automate the test and label it (or ask for it to be labeled) as `manual test` --- apollo-router/src/error.rs | 55 +++++-------- .../src/query_planner/bridge_query_planner.rs | 80 +++++++++++++------ apollo-router/src/spec/query.rs | 15 ++-- apollo-router/src/spec/schema.rs | 1 - 4 files changed, 86 insertions(+), 65 deletions(-) diff --git a/apollo-router/src/error.rs b/apollo-router/src/error.rs index 2902d50b10..3ca86dc198 100644 --- a/apollo-router/src/error.rs +++ b/apollo-router/src/error.rs @@ -1,5 +1,4 @@ //! Router errors. -use std::io::IsTerminal; use std::sync::Arc; use displaydoc::Display; @@ -11,7 +10,6 @@ use serde::Deserialize; use serde::Serialize; use thiserror::Error; use tokio::task::JoinError; -use tracing::level_filters::LevelFilter; pub(crate) use crate::configuration::ConfigurationError; pub(crate) use crate::graphql::Error; @@ -438,6 +436,14 @@ impl From for QueryPlannerError { } } +impl From for QueryPlannerError { + fn from(err: ValidationErrors) -> Self { + // This needs to be serializable, so eagerly stringify the non-serializable + // ApolloDiagnostics. + QueryPlannerError::SpecError(SpecError::ValidationError(err.to_string())) + } +} + impl From for QueryPlannerError { fn from(error: router_bridge::error::Error) -> Self { QueryPlannerError::RouterBridgeError(error) @@ -503,9 +509,9 @@ pub(crate) enum SchemaError { UrlParse(String, http::uri::InvalidUri), /// Could not find an URL for subgraph {0} MissingSubgraphUrl(String), - /// GraphQL parser error(s). + /// GraphQL parser error: {0} Parse(ParseErrors), - /// GraphQL parser or validation error(s). + /// GraphQL validation error: {0} Validate(ValidationErrors), /// Api error(s): {0} Api(String), @@ -520,11 +526,16 @@ pub(crate) struct ParseErrors { impl std::fmt::Display for ParseErrors { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut errors = self.errors.iter(); - if let Some(error) = errors.next() { - write!(f, "{}", error.message())?; + for (i, error) in errors.by_ref().take(5).enumerate() { + if i > 0 { + f.write_str("\n")?; + } + // TODO(@goto-bus-stop): display line/column once that is exposed from apollo-rs + write!(f, "at index {}: {}", error.index(), error.message())?; } - for error in errors { - write!(f, "\n{}", error.message())?; + let remaining = errors.count(); + if remaining > 0 { + write!(f, "\n...and {remaining} other errors")?; } Ok(()) } @@ -540,39 +551,15 @@ impl std::fmt::Display for ValidationErrors { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut errors = self.errors.iter(); if let Some(error) = errors.next() { - write!(f, "{}", error.data)?; + write!(f, "at index {}: {}", error.location.offset(), error.data)?; } for error in errors { - write!(f, "\n{}", error.data)?; + write!(f, "\nat index {}: {}", error.location.offset(), error.data)?; } Ok(()) } } -impl ValidationErrors { - #[allow(clippy::needless_return)] - pub(crate) fn print(&self) { - if LevelFilter::current() == LevelFilter::OFF && cfg!(not(debug_assertions)) { - return; - } else if std::io::stdout().is_terminal() { - // Fancy reports for TTYs - self.errors.iter().for_each(|err| { - // `format!` works around https://github.com/rust-lang/rust/issues/107118 - // to test the panic from https://github.com/apollographql/router/issues/2269 - #[allow(clippy::format_in_format_args)] - { - println!("{}", format!("{err}")); - } - }); - } else { - // Best effort to display errors - self.errors.iter().for_each(|diag| { - println!("{}", diag.data); - }); - }; - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/apollo-router/src/query_planner/bridge_query_planner.rs b/apollo-router/src/query_planner/bridge_query_planner.rs index 7805d127f9..4a677aaad1 100644 --- a/apollo-router/src/query_planner/bridge_query_planner.rs +++ b/apollo-router/src/query_planner/bridge_query_planner.rs @@ -275,6 +275,50 @@ impl BridgeQueryPlanner { operation: Option, selections: Query, ) -> Result { + /// Compare errors from graphql-js and apollo-rs validation, and produce metrics on + /// whether they had the same result. + /// + /// The result isn't inspected deeply: it only checks validation success/failure. + fn compare_validation_errors( + js_validation_error: Option<&router_bridge::planner::PlanErrors>, + rs_validation_error: Option<&crate::error::ValidationErrors>, + ) { + let is_validation_error = js_validation_error + .map_or(false, |js| js.errors.iter().all(|err| err.validation_error)); + match (is_validation_error, rs_validation_error) { + (false, Some(validation_error)) => { + tracing::warn!( + monotonic_counter.apollo.router.validation = 1u64, + validation.source = VALIDATION_SOURCE_OPERATION, + validation.result = VALIDATION_FALSE_POSITIVE, + "validation mismatch: JS query planner did not report query validation error, but apollo-rs did" + ); + tracing::warn!( + "validation mismatch: Rust validation reported: {validation_error}" + ); + } + (true, None) => { + tracing::warn!( + monotonic_counter.apollo.router.validation = 1u64, + validation.source = VALIDATION_SOURCE_OPERATION, + validation.result = VALIDATION_FALSE_NEGATIVE, + "validation mismatch: apollo-rs did not report query validation error, but JS query planner did" + ); + tracing::warn!( + "validation mismatch: JS validation reported: {}", + // Unwrapping is safe because `is_validation_error` is true + js_validation_error.unwrap(), + ); + } + // if JS and Rust implementations agree, we return the JS result for now. + _ => tracing::info!( + monotonic_counter.apollo.router.validation = 1u64, + validation.source = VALIDATION_SOURCE_OPERATION, + validation.result = VALIDATION_MATCH, + ), + } + } + let planner_result = self .planner .plan(filtered_query.clone(), operation.clone()) @@ -282,35 +326,23 @@ impl BridgeQueryPlanner { .map_err(QueryPlannerError::RouterBridgeError)? .into_result() .map_err(|err| { - let is_validation_error = err.errors.iter().all(|err| err.validation_error); - match (is_validation_error, &selections.validation_error) { - (false, Some(_)) => { - tracing::warn!( - monotonic_counter.apollo.router.validation = 1u64, - validation.source = VALIDATION_SOURCE_OPERATION, - validation.result = VALIDATION_FALSE_POSITIVE, - "validation mismatch: JS query planner did not report query validation error, but apollo-rs did" - ); - } - (true, None) => { - tracing::warn!( - monotonic_counter.apollo.router.validation = 1u64, - validation.source = VALIDATION_SOURCE_OPERATION, - validation.result = VALIDATION_FALSE_NEGATIVE, - "validation mismatch: apollo-rs did not report query validation error, but JS query planner did" - ); - } - // if JS and Rust implementations agree, we return the JS result for now. - _ => tracing::info!( - monotonic_counter.apollo.router.validation = 1u64, - validation.source = VALIDATION_SOURCE_OPERATION, - validation.result = VALIDATION_MATCH, - ), + if matches!( + self.configuration.experimental_graphql_validation_mode, + GraphQLValidationMode::Both + ) { + compare_validation_errors(Some(&err), selections.validation_error.as_ref()); } QueryPlannerError::from(err) })?; + if matches!( + self.configuration.experimental_graphql_validation_mode, + GraphQLValidationMode::Both + ) { + compare_validation_errors(None, selections.validation_error.as_ref()); + } + // the `statsReportKey` field should match the original query instead of the filtered query, to index them all under the same query let operation_signature = if original_query != filtered_query { Some( diff --git a/apollo-router/src/spec/query.rs b/apollo-router/src/spec/query.rs index 6f6b5e02d3..da5b66d4c3 100644 --- a/apollo-router/src/spec/query.rs +++ b/apollo-router/src/spec/query.rs @@ -69,9 +69,12 @@ pub(crate) struct Query { pub(crate) is_original: bool, /// Validation errors, used for comparison with the JS implementation. /// + /// `ValidationErrors` is not serde-serializable. If this comes from cache, + /// the plan ought also to be cached, so we should not need this value anyways. /// XXX(@goto-bus-stop): Remove when only Rust validation is used #[derivative(PartialEq = "ignore", Hash = "ignore")] - pub(crate) validation_error: Option, + #[serde(skip)] + pub(crate) validation_error: Option, } #[derive(Debug, Serialize, Deserialize)] @@ -318,7 +321,10 @@ impl Query { } /// Check for validation errors in a query in the compiler. - pub(crate) fn validate_query(compiler: &ApolloCompiler, id: FileId) -> Result<(), SpecError> { + pub(crate) fn validate_query( + compiler: &ApolloCompiler, + id: FileId, + ) -> Result<(), ValidationErrors> { // Bail out on validation errors, only if the input is expected to be valid let diagnostics = compiler.db.validate_executable(id); let errors = diagnostics @@ -330,10 +336,7 @@ impl Query { return Ok(()); } - let errors = ValidationErrors { errors }; - errors.print(); - - Err(SpecError::ValidationError(errors.to_string())) + Err(ValidationErrors { errors }) } /// Extract serializable data structures from the apollo-compiler HIR. diff --git a/apollo-router/src/spec/schema.rs b/apollo-router/src/spec/schema.rs index 892883339b..32e955b786 100644 --- a/apollo-router/src/spec/schema.rs +++ b/apollo-router/src/spec/schema.rs @@ -91,7 +91,6 @@ impl Schema { let errors = ValidationErrors { errors: diagnostics.clone(), }; - errors.print(); // Only error out if new validation is used: with `Both`, we take the legacy // validation as authoritative and only use the new result for comparison From 08c52db71440e450af2bc09ab8aff544ac501426 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 8 Sep 2023 19:08:33 +0000 Subject: [PATCH 73/81] fix(deps): update dependency dd-trace to v4.15.0 --- dockerfiles/tracing/datadog-subgraph/package-lock.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/dockerfiles/tracing/datadog-subgraph/package-lock.json b/dockerfiles/tracing/datadog-subgraph/package-lock.json index da582feb1e..cfa64b06c1 100644 --- a/dockerfiles/tracing/datadog-subgraph/package-lock.json +++ b/dockerfiles/tracing/datadog-subgraph/package-lock.json @@ -868,9 +868,9 @@ "integrity": "sha512-/RC5F4l1SCqD/jazwUF6+t34Cd8zTSAGZ7rvvZu1whZUhD2a5MOGKjSGowoGcpj/cbVZk1ZODIooJEQQq3nNAA==" }, "node_modules/dd-trace": { - "version": "4.14.0", - "resolved": "https://registry.npmjs.org/dd-trace/-/dd-trace-4.14.0.tgz", - "integrity": "sha512-hxYWynK44VWz5FjQTG9QmQwpb9WkgBQ1QMfan9VfXt04N1H9slvFBteh3rKOUwZk61mHmqorWC0uYz4ipcQUig==", + "version": "4.15.0", + "resolved": "https://registry.npmjs.org/dd-trace/-/dd-trace-4.15.0.tgz", + "integrity": "sha512-LesEnL2X1qqvwSCCxkSm/qRkuFUpmamf/BloGcz3B72og2qdNel8W8uviwNO9b7OGC3Fm+QzdfqwfTkoIMUZwQ==", "hasInstallScript": true, "dependencies": { "@datadog/native-appsec": "^3.2.0", From f25aa0e80a8a3d6c0250bbaf69cdffdfb98d9dd7 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 11 Sep 2023 06:52:49 +0000 Subject: [PATCH 74/81] chore(deps): update all non-major packages >= 1.0 --- Cargo.lock | 11 ++++++----- apollo-router/Cargo.toml | 6 +++--- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f203e3022b..2ea7eb152b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5051,14 +5051,15 @@ dependencies = [ [[package]] name = "rhai" -version = "1.15.1" +version = "1.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c2a11a646ef5d4e4a9d5cf80c7e4ecb20f9b1954292d5c5e6d6cbc8d33728ec" +checksum = "637a4f79f65571b1fd1a0ebbae05bbbf58a01faf612abbc3eea15cda34f0b87a" dependencies = [ "ahash", - "bitflags 1.3.2", + "bitflags 2.4.0", "instant", "num-traits", + "once_cell", "rhai_codegen", "serde", "smallvec", @@ -5579,9 +5580,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.105" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693151e1ac27563d6dbcec9dee9fbd5da8539b20fa14ad3752b2e6d363ace360" +checksum = "2cc66a619ed80bf7a0f6b17dd063a84b88f6dea1813737cf469aef1d081142c2" dependencies = [ "indexmap 2.0.0", "itoa", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 50c7235249..6e9eab0e03 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -164,7 +164,7 @@ prost-types = "0.11.9" proteus = "0.5.0" rand = "0.8.5" rand_core = "0.6.4" -rhai = { version = "1.15.1", features = ["sync", "serde", "internals"] } +rhai = { version = "1.16.1", features = ["sync", "serde", "internals"] } regex = "1.9.5" reqwest = { version = "0.11.19", default-features = false, features = [ "rustls-tls", @@ -182,7 +182,7 @@ shellexpand = "3.1.0" sha2 = "0.10.7" serde = { version = "1.0.188", features = ["derive", "rc"] } serde_json_bytes = { version = "0.2.1", features = ["preserve_order"] } -serde_json = { version = "1.0.105", features = [ +serde_json = { version = "1.0.106", features = [ "preserve_order", "float_roundtrip", ] } @@ -275,7 +275,7 @@ reqwest = { version = "0.11.19", default-features = false, features = [ "json", "stream", ] } -rhai = { version = "1.15.1", features = [ +rhai = { version = "1.16.1", features = [ "sync", "serde", "internals", From ba109e088cf3a255bdbd546af2cee689d002fcd1 Mon Sep 17 00:00:00 2001 From: Bryn Cooke Date: Mon, 11 Sep 2023 09:39:42 +0100 Subject: [PATCH 75/81] Fix metrics typos (#3798) Fix typos: * timout->timeout * filtered->authorization.filtered * needs_authenticated->authorization.authenticated * needs_requires_scopes->authorization.requires_scopes **Checklist** Complete the checklist (and note appropriate exceptions) before a final PR is raised. - [ ] Changes are compatible[^1] - [ ] Documentation[^2] completed - [ ] Performance impact assessed and acceptable - Tests added and passing[^3] - [ ] Unit Tests - [ ] Integration Tests - [ ] Manual Tests **Exceptions** *Note any exceptions here* **Notes** [^1]. It may be appropriate to bring upcoming changes to the attention of other (impacted) groups. Please endeavour to do this before seeking PR approval. The mechanism for doing this will vary considerably, so use your judgement as to how and when to do this. [^2]. Configuration is an important part of many changes. Where applicable please try to document configuration examples. [^3]. Tick whichever testing boxes are applicable. If you are adding Manual Tests: - please document the manual testing (extensively) in the Exceptions. - please raise a separate issue to automate the test and label it (or ask for it to be labeled) as `manual test` Co-authored-by: bryn --- apollo-router/src/configuration/metrics.rs | 2 +- ...ration__metrics__test__metrics@entities.router.yaml.snap | 2 +- ..._metrics__test__metrics@traffic_shaping.router.yaml.snap | 2 +- apollo-router/src/plugins/authorization/mod.rs | 6 +++--- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/apollo-router/src/configuration/metrics.rs b/apollo-router/src/configuration/metrics.rs index eb459b92e9..1bd5db048a 100644 --- a/apollo-router/src/configuration/metrics.rs +++ b/apollo-router/src/configuration/metrics.rs @@ -263,7 +263,7 @@ impl Metrics { log_usage_metrics!( value.apollo.router.config.traffic_shaping, "$.traffic_shaping", - opt.router.timout, + opt.router.timeout, "$$[?(@.router.timeout)]", opt.router.rate_limit, "$.router.global_rate_limit", diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@entities.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@entities.router.yaml.snap index e4fe10d957..dc5ccc2c68 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@entities.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@entities.router.yaml.snap @@ -8,7 +8,7 @@ value.apollo.router.config.entities: value.apollo.router.config.traffic_shaping: - 1 - opt__router__rate_limit__: false - opt__router__timout__: false + opt__router__timeout__: false opt__subgraph__compression__: false opt__subgraph__deduplicate_query__: false opt__subgraph__http2__: false diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@traffic_shaping.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@traffic_shaping.router.yaml.snap index ab53cd0460..87a594c8fa 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@traffic_shaping.router.yaml.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__metrics__test__metrics@traffic_shaping.router.yaml.snap @@ -5,7 +5,7 @@ expression: "&metrics.metrics" value.apollo.router.config.traffic_shaping: - 1 - opt__router__rate_limit__: true - opt__router__timout__: true + opt__router__timeout__: true opt__subgraph__compression__: true opt__subgraph__deduplicate_query__: true opt__subgraph__http2__: true diff --git a/apollo-router/src/plugins/authorization/mod.rs b/apollo-router/src/plugins/authorization/mod.rs index 7bc42dd114..489f02a4c7 100644 --- a/apollo-router/src/plugins/authorization/mod.rs +++ b/apollo-router/src/plugins/authorization/mod.rs @@ -482,9 +482,9 @@ impl Plugin for AuthorizationPlugin { if needs_authenticated || needs_requires_scopes { tracing::info!( monotonic_counter.apollo.router.operations.authorization = 1u64, - filtered = filtered, - authenticated = needs_authenticated, - requires_scopes = needs_requires_scopes, + authorization.filtered = filtered, + authorization.needs_authenticated = needs_authenticated, + authorization.needs_requires_scopes = needs_requires_scopes, ); } From 9481e8eeb1646e257dc2435a77a397fd9630fee7 Mon Sep 17 00:00:00 2001 From: Jeremy Lempereur Date: Mon, 11 Sep 2023 14:13:22 +0200 Subject: [PATCH 76/81] Subgraph authentication: Make sure Request signing happens after Compression and APQ (#3735) Fix #3608 The router now adds SigningParams to the private context, which the subgraph service can use to sign http calls before the HTTP fetch is made (for websocket connection and regular http calls) --- .changesets/fix_igni_sigv4_http_level.md | 12 + .../src/plugins/authentication/mod.rs | 2 +- .../src/plugins/authentication/subgraph.rs | 233 ++++++++++++------ .../src/services/subgraph_service.rs | 60 ++++- docs/source/configuration/authn-subgraph.mdx | 6 + 5 files changed, 231 insertions(+), 82 deletions(-) create mode 100644 .changesets/fix_igni_sigv4_http_level.md diff --git a/.changesets/fix_igni_sigv4_http_level.md b/.changesets/fix_igni_sigv4_http_level.md new file mode 100644 index 0000000000..5679499772 --- /dev/null +++ b/.changesets/fix_igni_sigv4_http_level.md @@ -0,0 +1,12 @@ +### Subgraph authentication: Make sure Request signing happens after Compression and APQ ([Issue #3608](https://github.com/apollographql/router/issues/3608)) + +[Subgraph authentication](https://www.apollographql.com/docs/router/configuration/authn-subgraph) is available since router v1.27.0. + +Unfortunately this first version didn't work well with features that operate with the SubgraphService, for example: + - Subgraph APQ + - Subgraph HTTP compression + - Custom plugins that operate on the Subgraph level, written either via coprocessors, in rhai, or native. + +The router will now sign subgraph requests just before they are sent to subgraphs. + +By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/3735 diff --git a/apollo-router/src/plugins/authentication/mod.rs b/apollo-router/src/plugins/authentication/mod.rs index b90c814b7a..2c8d81fefc 100644 --- a/apollo-router/src/plugins/authentication/mod.rs +++ b/apollo-router/src/plugins/authentication/mod.rs @@ -44,7 +44,7 @@ use crate::services::router; use crate::Context; mod jwks; -mod subgraph; +pub(crate) mod subgraph; #[cfg(test)] mod tests; diff --git a/apollo-router/src/plugins/authentication/subgraph.rs b/apollo-router/src/plugins/authentication/subgraph.rs index 7839d1628d..0ad2abb7f5 100644 --- a/apollo-router/src/plugins/authentication/subgraph.rs +++ b/apollo-router/src/plugins/authentication/subgraph.rs @@ -1,4 +1,3 @@ -use core::ops::ControlFlow; use std::collections::HashMap; use std::sync::Arc; use std::time::SystemTime; @@ -11,14 +10,16 @@ use aws_sigv4::http_request::PayloadChecksumKind; use aws_sigv4::http_request::SignableBody; use aws_sigv4::http_request::SignableRequest; use aws_sigv4::http_request::SigningSettings; +use aws_sigv4::signing_params; use aws_types::region::Region; +use http::Request; +use hyper::Body; use schemars::JsonSchema; use serde::Deserialize; use tower::BoxError; use tower::ServiceBuilder; use tower::ServiceExt; -use crate::layers::ServiceBuilderExt; use crate::services::SubgraphRequest; /// Hardcoded Config using access_key and secret. @@ -193,6 +194,114 @@ pub(crate) struct SigningParamsConfig { credentials_provider: Arc, region: Region, service_name: String, + subgraph_name: String, +} + +impl SigningParamsConfig { + pub(crate) async fn sign( + self, + mut req: Request, + subgraph_name: &str, + ) -> Result, BoxError> { + let credentials = self.credentials().await?; + let builder = self.signing_params_builder(&credentials).await?; + let (parts, body) = req.into_parts(); + // Depending on the servicve, AWS refuses sigv4 payloads that contain specific headers. + // We'll go with default signed headers + let headers = Default::default(); + // UnsignedPayload only applies to lattice + let body_bytes = hyper::body::to_bytes(body).await?.to_vec(); + let signable_request = SignableRequest::new( + &parts.method, + &parts.uri, + &headers, + match self.service_name.as_str() { + "vpc-lattice-svcs" => SignableBody::UnsignedPayload, + _ => SignableBody::Bytes(body_bytes.as_slice()), + }, + ); + + let signing_params = builder.build().expect("all required fields set"); + + let (signing_instructions, _signature) = sign(signable_request, &signing_params) + .map_err(|err| { + increment_failure_counter(subgraph_name); + let error = format!("failed to sign GraphQL body for AWS SigV4: {}", err); + tracing::error!("{}", error); + error + })? + .into_parts(); + req = Request::::from_parts(parts, body_bytes.into()); + signing_instructions.apply_to_request(&mut req); + increment_success_counter(subgraph_name); + Ok(req) + } + // This function is the same as above, except it's a new one because () doesn't implement HttpBody` + pub(crate) async fn sign_empty( + self, + mut req: Request<()>, + subgraph_name: &str, + ) -> Result, BoxError> { + let credentials = self.credentials().await?; + let builder = self.signing_params_builder(&credentials).await?; + let (parts, _) = req.into_parts(); + // Depending on the servicve, AWS refuses sigv4 payloads that contain specific headers. + // We'll go with default signed headers + let headers = Default::default(); + // UnsignedPayload only applies to lattice + let signable_request = SignableRequest::new( + &parts.method, + &parts.uri, + &headers, + match self.service_name.as_str() { + "vpc-lattice-svcs" => SignableBody::UnsignedPayload, + _ => SignableBody::Bytes(&[]), + }, + ); + + let signing_params = builder.build().expect("all required fields set"); + + let (signing_instructions, _signature) = sign(signable_request, &signing_params) + .map_err(|err| { + increment_failure_counter(subgraph_name); + let error = format!("failed to sign GraphQL body for AWS SigV4: {}", err); + tracing::error!("{}", error); + error + })? + .into_parts(); + req = Request::<()>::from_parts(parts, ()); + signing_instructions.apply_to_request(&mut req); + increment_success_counter(subgraph_name); + Ok(req) + } + + async fn signing_params_builder<'s>( + &'s self, + credentials: &'s Credentials, + ) -> Result, BoxError> { + let settings = get_signing_settings(self); + let mut builder = http_request::SigningParams::builder() + .access_key(credentials.access_key_id()) + .secret_key(credentials.secret_access_key()) + .region(self.region.as_ref()) + .service_name(&self.service_name) + .time(SystemTime::now()) + .settings(settings); + builder.set_security_token(credentials.session_token()); + Ok(builder) + } + + async fn credentials(&self) -> Result { + self.credentials_provider + .provide_credentials() + .await + .map_err(|err| { + increment_failure_counter(self.subgraph_name.as_str()); + let error = format!("failed to get credentials for AWS SigV4 signing: {}", err); + tracing::error!("{}", error); + error.into() + }) + } } fn increment_success_counter(subgraph_name: &str) { @@ -234,6 +343,7 @@ pub(super) async fn make_signing_params( region: config.region(), service_name: config.service_name(), credentials_provider, + subgraph_name: subgraph_name.to_string(), }) } } @@ -244,7 +354,7 @@ pub(super) async fn make_signing_params( fn get_signing_settings(signing_params: &SigningParamsConfig) -> SigningSettings { let mut settings = SigningSettings::default(); settings.payload_checksum_kind = match signing_params.service_name.as_str() { - "s3" | "vpc-lattice-svcs" => PayloadChecksumKind::XAmzSha256, + "appsync" | "s3" | "vpc-lattice-svcs" => PayloadChecksumKind::XAmzSha256, _ => PayloadChecksumKind::NoHeader, }; settings @@ -261,77 +371,12 @@ impl SubgraphAuth { service: crate::services::subgraph::BoxService, ) -> crate::services::subgraph::BoxService { if let Some(signing_params) = self.params_for_service(name) { - let name = name.to_string(); ServiceBuilder::new() - .checkpoint_async(move |mut req: SubgraphRequest| { + .map_request(move |req: SubgraphRequest| { let signing_params = signing_params.clone(); - let name = name.clone(); - async move { - let credentials = signing_params - .credentials_provider - .provide_credentials() - .await - .map_err(|err| { - increment_failure_counter(name.as_str()); - let error = format!( - "failed to get credentials for AWS SigV4 signing: {}", - err - ); - tracing::error!("{}", error); - error - })?; - - let settings = get_signing_settings(&signing_params); - let mut builder = http_request::SigningParams::builder() - .access_key(credentials.access_key_id()) - .secret_key(credentials.secret_access_key()) - .region(signing_params.region.as_ref()) - .service_name(&signing_params.service_name) - .time(SystemTime::now()) - .settings(settings); - builder.set_security_token(credentials.session_token()); - let body_bytes = - serde_json::to_vec(&req.subgraph_request.body()).map_err(|err| { - increment_failure_counter(name.as_str()); - let error = format!( - "failed to serialize GraphQL body for AWS SigV4 signing: {}", - err - ); - tracing::error!("{}", error); - error - })?; - - // UnsignedPayload only applies to lattice - let signable_request = SignableRequest::new( - req.subgraph_request.method(), - req.subgraph_request.uri(), - req.subgraph_request.headers(), - match signing_params.service_name.as_str() { - "vpc-lattice-svcs" => SignableBody::UnsignedPayload, - _ => SignableBody::Bytes(&body_bytes), - }, - ); - - let signing_params = builder.build().expect("all required fields set"); - - let (signing_instructions, _signature) = - sign(signable_request, &signing_params) - .map_err(|err| { - increment_failure_counter(name.as_str()); - let error = format!( - "failed to sign GraphQL body for AWS SigV4: {}", - err - ); - tracing::error!("{}", error); - error - })? - .into_parts(); - signing_instructions.apply_to_request(&mut req.subgraph_request); - increment_success_counter(name.as_str()); - Ok(ControlFlow::Continue(req)) - } + req.context.private_entries.lock().insert(signing_params); + req }) - .buffered() .service(service) .boxed() } else { @@ -396,6 +441,10 @@ mod test { .await .payload_checksum_kind ); + assert_eq!( + PayloadChecksumKind::XAmzSha256, + test_signing_settings("appsync").await.payload_checksum_kind + ); assert_eq!( PayloadChecksumKind::NoHeader, test_signing_settings("something-else") @@ -464,10 +513,10 @@ mod test { mock.expect_call() .times(1) .withf(|request| { + let http_request = get_signed_request(request, "products".to_string()); assert_eq!( "UNSIGNED-PAYLOAD", - request - .subgraph_request + http_request .headers() .get("x-amz-content-sha256") .unwrap() @@ -509,21 +558,22 @@ mod test { mock.expect_call() .times(1) .withf(|request| { - let authorization_regex = Regex::new(r"AWS4-HMAC-SHA256 Credential=id/\d{8}/us-east-1/s3/aws4_request, SignedHeaders=content-length;content-type;host;x-amz-content-sha256;x-amz-date, Signature=[a-f0-9]{64}").unwrap(); - let authorization_header_str = request.subgraph_request.headers().get("authorization").unwrap().to_str().unwrap(); + let http_request = get_signed_request(request, "products".to_string()); + let authorization_regex = Regex::new(r"AWS4-HMAC-SHA256 Credential=id/\d{8}/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=[a-f0-9]{64}").unwrap(); + let authorization_header_str = http_request.headers().get("authorization").unwrap().to_str().unwrap(); assert_eq!(match authorization_regex.find(authorization_header_str) { Some(m) => m.as_str(), None => "no match" }, authorization_header_str); let x_amz_date_regex = Regex::new(r"\d{8}T\d{6}Z").unwrap(); - let x_amz_date_header_str = request.subgraph_request.headers().get("x-amz-date").unwrap().to_str().unwrap(); + let x_amz_date_header_str = http_request.headers().get("x-amz-date").unwrap().to_str().unwrap(); assert_eq!(match x_amz_date_regex.find(x_amz_date_header_str) { Some(m) => m.as_str(), None => "no match" }, x_amz_date_header_str); - assert_eq!(request.subgraph_request.headers().get("x-amz-content-sha256").unwrap(), "255959b4c6e11c1080f61ce0d75eb1b565c1772173335a7828ba9c13c25c0d8c"); + assert_eq!(http_request.headers().get("x-amz-content-sha256").unwrap(), "255959b4c6e11c1080f61ce0d75eb1b565c1772173335a7828ba9c13c25c0d8c"); true }) @@ -579,6 +629,7 @@ mod test { .header(HOST, "rhost") .header(CONTENT_LENGTH, "22") .header(CONTENT_TYPE, "graphql") + .uri("https://test-endpoint.com") .body(Request::builder().query("query").build()) .expect("expecting valid request"), ) @@ -586,4 +637,32 @@ mod test { .context(Context::new()) .build() } + + fn get_signed_request( + request: &SubgraphRequest, + service_name: String, + ) -> hyper::Request { + let signing_params = { + let ctx = request.context.private_entries.lock(); + let sp = ctx.get::(); + sp.cloned().unwrap() + }; + + let http_request = request + .clone() + .subgraph_request + .map(|body| hyper::Body::from(serde_json::to_string(&body).unwrap())); + + std::thread::spawn(move || { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + signing_params + .sign(http_request, service_name.as_str()) + .await + .unwrap() + }) + }) + .join() + .unwrap() + } } diff --git a/apollo-router/src/services/subgraph_service.rs b/apollo-router/src/services/subgraph_service.rs index c99e482786..8ba73b4068 100644 --- a/apollo-router/src/services/subgraph_service.rs +++ b/apollo-router/src/services/subgraph_service.rs @@ -60,6 +60,7 @@ use super::Plugins; use crate::error::FetchError; use crate::graphql; use crate::json_ext::Object; +use crate::plugins::authentication::subgraph::SigningParamsConfig; use crate::plugins::subscription::create_verifier; use crate::plugins::subscription::CallbackMode; use crate::plugins::subscription::SubscriptionConfig; @@ -488,11 +489,28 @@ async fn call_websocket( }; let request = get_websocket_request(service_name.clone(), parts, subgraph_cfg)?; + let display_headers = context.contains_key(LOGGING_DISPLAY_HEADERS); let display_body = context.contains_key(LOGGING_DISPLAY_BODY); + + let signing_params = context + .private_entries + .lock() + .get::() + .cloned(); + + let request = if let Some(signing_params) = signing_params { + signing_params + .sign_empty(request, service_name.as_str()) + .await? + } else { + request + }; + if display_headers { tracing::info!(http.request.headers = ?request.headers(), apollo.subgraph.name = %service_name, "Websocket request headers to subgraph {service_name:?}"); } + if display_body { tracing::info!(http.request.body = ?request.body(), apollo.subgraph.name = %service_name, "Websocket request body to subgraph {service_name:?}"); } @@ -530,14 +548,24 @@ async fn call_websocket( } _ => connect_async(request).instrument(subgraph_req_span).await, } - .map_err(|err| FetchError::SubrequestWsError { - service: service_name.clone(), - reason: format!("cannot connect websocket to subgraph: {err}"), + .map_err(|err| { + if display_body || display_headers { + tracing::info!( + http.response.error = format!("{:?}", &err), apollo.subgraph.name = %service_name, "Websocket connection error from subgraph {service_name:?} received" + ); + } + FetchError::SubrequestWsError { + service: service_name.clone(), + reason: format!("cannot connect websocket to subgraph: {err}"), + } })?; + if display_headers { + tracing::info!(response.headers = ?resp.headers(), apollo.subgraph.name = %service_name, "Websocket response headers to subgraph {service_name:?}"); + } if display_body { tracing::info!( - response.body = %String::from_utf8_lossy(&resp.body_mut().take().unwrap_or_default()), apollo.subgraph.name = %service_name, "Raw response body from subgraph {service_name:?} received" + response.body = %String::from_utf8_lossy(&resp.body_mut().take().unwrap_or_default()), apollo.subgraph.name = %service_name, "Websocket response body from subgraph {service_name:?} received" ); } @@ -674,6 +702,18 @@ async fn call_http( let display_headers = context.contains_key(LOGGING_DISPLAY_HEADERS); let display_body = context.contains_key(LOGGING_DISPLAY_BODY); + let signing_params = context + .private_entries + .lock() + .get::() + .cloned(); + + let request = if let Some(signing_params) = signing_params { + signing_params.sign(request, service_name).await? + } else { + request + }; + // Print out the debug for the request if display_headers { tracing::info!(http.request.headers = ?request.headers(), apollo.subgraph.name = %service_name, "Request headers to subgraph {service_name:?}"); @@ -694,6 +734,18 @@ async fn call_http( .instrument(subgraph_req_span) .await?; + // Print out the debug for the response + if display_headers { + tracing::info!(response.headers = ?parts.headers, apollo.subgraph.name = %service_name, "Response headers from subgraph {service_name:?}"); + } + if display_body { + if let Some(Ok(b)) = &body { + tracing::info!( + response.body = %String::from_utf8_lossy(b), apollo.subgraph.name = %service_name, "Raw response body from subgraph {service_name:?} received" + ); + } + } + let mut graphql_response = match (content_type, body, parts.status.is_success()) { (Ok(ContentType::ApplicationGraphqlResponseJson), Some(Ok(body)), _) | (Ok(ContentType::ApplicationJson), Some(Ok(body)), true) => { diff --git a/docs/source/configuration/authn-subgraph.mdx b/docs/source/configuration/authn-subgraph.mdx index 63a1f98c85..771af8ac1e 100644 --- a/docs/source/configuration/authn-subgraph.mdx +++ b/docs/source/configuration/authn-subgraph.mdx @@ -7,6 +7,12 @@ The Apollo Router supports subgraph request authentication and key rotation via This allows you to secure communication to AWS subgraphs by making sure a subgraph request was made by the Apollo Router, and the payload hasn't been tampered with. +We have tested the feature against the following services: + - AWS Lambda URL + - AWS Appsync + - AWS Amazon API Gateway + - VPC Lattice ⚠️ VPC Lattice doesn't support websockets, you won't be able to use Subscriptions in passthrough mode. + **To use this feature:** To use this feature, your AWS hosted subgraphs must be configured with IAM to accept [signed requests](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-signing.html). From 619e2840b29da2e32e5b9076f1031bd3f13d5846 Mon Sep 17 00:00:00 2001 From: Jeremy Lempereur Date: Mon, 11 Sep 2023 14:13:39 +0200 Subject: [PATCH 77/81] Fix: deal with interface inheritance when retrieving selectionset (#3793) Followup to #3718, this changeset makes sure we're able to generate the most concrete selection set for a given operation. This means finding the most concrete type we can when we're dealing with interfaces: - If InterfaceA implements InterfaceB, use InterfaceA as current_type to generate an inline fragment's selection set Given the following invariants: ```graphql interface OperationItemStuff implements OperationItem ``` For ```graphql fragment OperationItemFragment on OperationItem { ... on OperationItemStuff { stuff } } ``` The most concrete interface to generate fields for `OperationItemStuff` is not `OperationItem`, so we narrow down the selection to `OperationItemStuff`. The fixes for #3718 still apply, IE: Given the following invariants: ```graphql type Dog implements Animal ``` For ```graphql ...on Animal { id ...on Dog { name } } ``` The most concrete type to generate a selection set for `Dog` is not `Animal`, so we narrow down the selection to `Dog`. --- .../src/services/supergraph_service.rs | 203 ++++++++++++++++++ apollo-router/src/spec/schema.rs | 38 ++++ apollo-router/src/spec/selection.rs | 9 +- 3 files changed, 248 insertions(+), 2 deletions(-) diff --git a/apollo-router/src/services/supergraph_service.rs b/apollo-router/src/services/supergraph_service.rs index 9f14a693a0..198b29b8be 100644 --- a/apollo-router/src/services/supergraph_service.rs +++ b/apollo-router/src/services/supergraph_service.rs @@ -3103,4 +3103,207 @@ mod tests { ); insta::assert_json_snapshot!(with_reversed_fragments); } + + #[tokio::test] + async fn multiple_interface_types() { + let schema = r#" + schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.3", for: EXECUTION) { + query: Query + } + + directive @join__enumValue(graph: join__Graph!) repeatable on ENUM_VALUE + + directive @join__field( + graph: join__Graph + requires: join__FieldSet + provides: join__FieldSet + type: String + external: Boolean + override: String + usedOverridden: Boolean + ) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + + directive @join__graph(name: String!, url: String!) on ENUM_VALUE + + directive @join__implements( + graph: join__Graph! + interface: String! + ) repeatable on OBJECT | INTERFACE + + directive @join__type( + graph: join__Graph! + key: join__FieldSet + extension: Boolean! = false + resolvable: Boolean! = true + isInterfaceObject: Boolean! = false + ) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + + directive @join__unionMember( + graph: join__Graph! + member: String! + ) repeatable on UNION + + directive @link( + url: String + as: String + for: link__Purpose + import: [link__Import] + ) repeatable on SCHEMA + + directive @tag( + name: String! + ) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION | SCHEMA + + enum link__Purpose { + EXECUTION + SECURITY + } + + scalar join__FieldSet + scalar link__Import + + enum join__Graph { + GRAPH1 @join__graph(name: "graph1", url: "http://localhost:8080/graph1") + } + + type Query @join__type(graph: GRAPH1) { + root(id: ID!): Root @join__field(graph: GRAPH1) + } + + type Root @join__type(graph: GRAPH1, key: "id") { + id: ID! + operation(a: Int, b: Int): OperationResult! + } + + union OperationResult + @join__type(graph: GRAPH1) + @join__unionMember(graph: GRAPH1, member: "Operation") = + Operation + + type Operation @join__type(graph: GRAPH1) { + id: ID! + item: [OperationItem!]! + } + + interface OperationItem @join__type(graph: GRAPH1) { + type: OperationType! + } + + enum OperationType @join__type(graph: GRAPH1) { + ADD_ARGUMENT @join__enumValue(graph: GRAPH1) + } + + interface OperationItemRootType implements OperationItem + @join__implements(graph: GRAPH1, interface: "OperationItem") + @join__type(graph: GRAPH1) { + rootType: String! + type: OperationType! + } + + interface OperationItemStuff implements OperationItem + @join__implements(graph: GRAPH1, interface: "OperationItem") + @join__type(graph: GRAPH1) { + stuff: String! + type: OperationType! + } + + type OperationAddArgument implements OperationItem & OperationItemStuff & OperationItemValue + @join__implements(graph: GRAPH1, interface: "OperationItem") + @join__implements(graph: GRAPH1, interface: "OperationItemStuff") + @join__implements(graph: GRAPH1, interface: "OperationItemValue") + @join__type(graph: GRAPH1) { + stuff: String! + type: OperationType! + value: String! + } + + interface OperationItemValue implements OperationItem + @join__implements(graph: GRAPH1, interface: "OperationItem") + @join__type(graph: GRAPH1) { + type: OperationType! + value: String! + } + + type OperationRemoveSchemaRootOperation implements OperationItem & OperationItemRootType + @join__implements(graph: GRAPH1, interface: "OperationItem") + @join__implements(graph: GRAPH1, interface: "OperationItemRootType") + @join__type(graph: GRAPH1) { + rootType: String! + type: OperationType! + } + "#; + + let query = r#"fragment OperationItemFragment on OperationItem { + __typename + ... on OperationItemStuff { + __typename + stuff + } + ... on OperationItemRootType { + __typename + rootType + } + } + query MyQuery($id: ID!, $a: Int, $b: Int) { + root(id: $id) { + __typename + operation(a: $a, b: $b) { + __typename + ... on Operation { + __typename + item { + __typename + ...OperationItemFragment + ... on OperationItemStuff { + __typename + stuff + } + ... on OperationItemValue { + __typename + value + } + } + id + } + } + id + } + }"#; + + let subgraphs = MockedSubgraphs([ + // The response isn't interesting to us, + // we just need to make sure the query makes it through parsing and validation + ("graph1", MockSubgraph::builder().with_json( + serde_json::json!{{"query":"query MyQuery__graph1__0($id:ID!$a:Int$b:Int){root(id:$id){__typename operation(a:$a b:$b){__typename ...on Operation{__typename item{__typename ...on OperationItemStuff{__typename stuff}...on OperationItemRootType{__typename rootType}...on OperationItemValue{__typename value}}id}}id}}", "operationName": "MyQuery__graph1__0", "variables":{"id":"1234","a":1,"b":2}}}, + serde_json::json!{{"data": null }} + ).build()), + ].into_iter().collect()); + + let service = TestHarness::builder() + .configuration_json(serde_json::json!({"include_subgraph_errors": { "all": true } })) + .unwrap() + .schema(schema) + .extra_plugin(subgraphs) + .build_supergraph() + .await + .unwrap(); + + let request = supergraph::Request::fake_builder() + .context(defer_context()) + .query(query) + .variables( + serde_json_bytes::json! {{ "id": "1234", "a": 1, "b": 2}} + .as_object() + .unwrap() + .clone(), + ) + .build() + .unwrap(); + + let mut stream = service.clone().oneshot(request).await.unwrap(); + let response = stream.next_response().await.unwrap(); + assert_eq!(serde_json_bytes::Value::Null, response.data.unwrap()); + } } diff --git a/apollo-router/src/spec/schema.rs b/apollo-router/src/spec/schema.rs index 32e955b786..744e3d2cee 100644 --- a/apollo-router/src/spec/schema.rs +++ b/apollo-router/src/spec/schema.rs @@ -13,6 +13,7 @@ use http::Uri; use sha2::Digest; use sha2::Sha256; +use super::FieldType; use crate::configuration::GraphQLValidationMode; use crate::error::ParseErrors; use crate::error::SchemaError; @@ -159,6 +160,19 @@ impl Schema { .unwrap_or(false) } + pub(crate) fn is_implementation(&self, interface: &str, implementor: &str) -> bool { + self.type_system + .definitions + .interfaces + .get(interface) + .map(|interface| { + interface + .implements_interfaces() + .any(|i| i.interface() == implementor) + }) + .unwrap_or(false) + } + pub(crate) fn is_interface(&self, abstract_type: &str) -> bool { self.type_system .definitions @@ -166,6 +180,30 @@ impl Schema { .contains_key(abstract_type) } + // given two field, returns the one that implements the other, if applicable + pub(crate) fn most_precise<'f>( + &self, + a: &'f FieldType, + b: &'f FieldType, + ) -> Option<&'f FieldType> { + let typename_a = a.inner_type_name().unwrap_or_default(); + let typename_b = b.inner_type_name().unwrap_or_default(); + if typename_a == typename_b { + return Some(a); + } + if self.is_subtype(typename_a, typename_b) || self.is_implementation(typename_a, typename_b) + { + Some(b) + } else if self.is_subtype(typename_b, typename_a) + || self.is_implementation(typename_b, typename_a) + { + Some(a) + } else { + // No relationship between a and b + None + } + } + /// Return an iterator over subgraphs that yields the subgraph name and its URL. pub(crate) fn subgraphs(&self) -> impl Iterator { self.subgraphs.iter() diff --git a/apollo-router/src/spec/selection.rs b/apollo-router/src/spec/selection.rs index 5252a7f5d9..e1d6ee7386 100644 --- a/apollo-router/src/spec/selection.rs +++ b/apollo-router/src/spec/selection.rs @@ -164,12 +164,17 @@ impl Selection { schema.is_subtype( type_condition.as_str(), current_type.inner_type_name().unwrap_or("") - ) || + ) || schema.is_implementation( + type_condition.as_str(), + current_type.inner_type_name().unwrap_or("")) + || // if the current type and the type condition are both the same interface, it is still valid type_condition.as_str() == current_type.inner_type_name().unwrap_or("") ); - current_type + let relevant_type = schema.most_precise(current_type, &fragment_type); + debug_assert!(relevant_type.is_some()); + relevant_type.unwrap_or(&fragment_type) } else { &fragment_type }; From fba967010fd21a1594f148d5b1bd57d03fa9f8e5 Mon Sep 17 00:00:00 2001 From: Parker Holladay Date: Mon, 11 Sep 2023 06:15:20 -0600 Subject: [PATCH 78/81] docs: fix auth rhai example and link (#3795) - In Rhai, it is `request[.subgraph].body.extensions`, the example was missing the `.body` - Fix link to rhai example for forwarding headers --- docs/source/configuration/authn-jwt.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/source/configuration/authn-jwt.mdx b/docs/source/configuration/authn-jwt.mdx index 149be31017..0edff5acde 100644 --- a/docs/source/configuration/authn-jwt.mdx +++ b/docs/source/configuration/authn-jwt.mdx @@ -46,8 +46,8 @@ You enable JWT authentication for your router with the following steps: router: jwt: jwks: # This key is required. - - url: https://dev-zzp5enui.us.auth0.com/.well-known/jwks.json - issuer: + - url: https://dev-zzp5enui.us.auth0.com/.well-known/jwks.json + issuer: # These keys are optional. Default values are shown. header_name: Authorization @@ -204,7 +204,7 @@ fn process_request(request) { status: 401 }; } - request.subgraph.extensions["claims"] = claims; + request.subgraph.body.extensions["claims"] = claims; } ``` @@ -650,7 +650,7 @@ This matching strategy is necessary because some identity providers (IdPs) don't ## Forwarding JWTs to subgraphs -Because the Apollo Router handles validating incoming JWTs, you rarely need to pass those JWTs to individual subgraphs in their entirety. Instead, you usually want to [pass JWT _claims_ to subgraphs](#example-forwarding-claims-to-subgraphs) to enable fine-grained access control. +Because the Apollo Router handles validating incoming JWTs, you rarely need to pass those JWTs to individual subgraphs in their entirety. Instead, you usually want to [pass JWT _claims_ to subgraphs](#example-forwarding-claims-to-subgraphs-as-headers) to enable fine-grained access control. If you _do_ need to pass entire JWTs to subgraphs, you can do so via the Apollo Router's general-purpose [HTTP header propagation settings](./header-propagation). From 904b3096ed54d6349b69b15de0997278954cdb21 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Mon, 11 Sep 2023 14:46:18 +0200 Subject: [PATCH 79/81] Update tokio-tungstenite (#3643) Co-authored-by: Coenen Benjamin --- Cargo.lock | 47 +++---------------- apollo-router/Cargo.toml | 4 +- .../src/services/subgraph_service.rs | 2 +- 3 files changed, 9 insertions(+), 44 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2ea7eb152b..6d20ce4600 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -380,7 +380,7 @@ dependencies = [ "tokio", "tokio-rustls 0.24.1", "tokio-stream", - "tokio-tungstenite 0.18.0", + "tokio-tungstenite", "tokio-util", "toml 0.7.6", "tonic 0.8.3", @@ -972,7 +972,7 @@ dependencies = [ "sha1 0.10.5", "sync_wrapper", "tokio", - "tokio-tungstenite 0.20.0", + "tokio-tungstenite", "tower", "tower-layer", "tower-service", @@ -6374,22 +6374,6 @@ dependencies = [ "tokio-stream", ] -[[package]] -name = "tokio-tungstenite" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54319c93411147bced34cb5609a80e0a8e44c5999c93903a81cd866630ec0bfd" -dependencies = [ - "futures-util", - "log", - "rustls 0.20.8", - "rustls-native-certs", - "tokio", - "tokio-rustls 0.23.4", - "tungstenite 0.18.0", - "webpki", -] - [[package]] name = "tokio-tungstenite" version = "0.20.0" @@ -6398,8 +6382,11 @@ checksum = "2b2dbec703c26b00d74844519606ef15d09a7d6857860f84ad223dec002ddea2" dependencies = [ "futures-util", "log", + "rustls 0.21.6", + "rustls-native-certs", "tokio", - "tungstenite 0.20.0", + "tokio-rustls 0.24.1", + "tungstenite", ] [[package]] @@ -6778,27 +6765,6 @@ dependencies = [ "syn 2.0.29", ] -[[package]] -name = "tungstenite" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ee6ab729cd4cf0fd55218530c4522ed30b7b6081752839b68fcec8d0960788" -dependencies = [ - "base64 0.13.1", - "byteorder", - "bytes", - "http", - "httparse", - "log", - "rand 0.8.5", - "rustls 0.20.8", - "sha1 0.10.5", - "thiserror", - "url", - "utf-8", - "webpki", -] - [[package]] name = "tungstenite" version = "0.20.0" @@ -6812,6 +6778,7 @@ dependencies = [ "httparse", "log", "rand 0.8.5", + "rustls 0.21.6", "sha1 0.10.5", "thiserror", "url", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 6e9eab0e03..8af25ed3df 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -226,9 +226,7 @@ uuid = { version = "1.4.1", features = ["serde", "v4"] } yaml-rust = "0.4.5" wiremock = "0.5.19" wsl = "0.1.0" -tokio-tungstenite = { version = "0.18.0", features = [ - "rustls-tls-native-roots", -] } +tokio-tungstenite = { version = "0.20.0", features = ["rustls-tls-native-roots"] } tokio-rustls = "0.24.1" http-serde = "1.1.3" hmac = "0.12.1" diff --git a/apollo-router/src/services/subgraph_service.rs b/apollo-router/src/services/subgraph_service.rs index 8ba73b4068..2d0044d488 100644 --- a/apollo-router/src/services/subgraph_service.rs +++ b/apollo-router/src/services/subgraph_service.rs @@ -542,7 +542,7 @@ async fn call_websocket( let (ws_stream, mut resp) = match request.uri().scheme_str() { Some("wss") => { - connect_async_tls_with_config(request, None, None) + connect_async_tls_with_config(request, None, false, None) .instrument(subgraph_req_span) .await } From 970d39d4e85fe2e13146ca4a8808bfeba0767358 Mon Sep 17 00:00:00 2001 From: Jesse Rosenberger Date: Tue, 12 Sep 2023 11:27:06 +0300 Subject: [PATCH 80/81] Update Changelog for #3586 (#3804) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This corrects the CHANGELOG entry for #3586 which inadvertently suggested using `` twice instead of `` and (separately) ``: ``` https://studio.apollographql.com/graph//variant//operations?query= ``` This doesn't replace the need to document this in https://github.com/apollographql/router/issues/3803. 😄 --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 26f26fb183..e3050a84b1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -146,7 +146,7 @@ By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/p This exposes a new key in the `Context`, `apollo_operation_id`, which identifies operation you can find in studio: ``` -https://studio.apollographql.com/graph//variant//operations?query= +https://studio.apollographql.com/graph//variant//operations?query= ``` The `apollo_operation_id` context key is exposed during: From a791fdde9c3aa81e64209a39f3ad0d7ac3a9f6bf Mon Sep 17 00:00:00 2001 From: Gary Pennington Date: Tue, 12 Sep 2023 11:35:26 +0100 Subject: [PATCH 81/81] update the documentation for extraLabels The documentation for the helm chart will be generated at release time and will include the updated comment. --- helm/chart/router/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/helm/chart/router/values.yaml b/helm/chart/router/values.yaml index f682a3f933..94120f7f00 100644 --- a/helm/chart/router/values.yaml +++ b/helm/chart/router/values.yaml @@ -94,7 +94,7 @@ extraContainers: [] # command: ["sh"] initContainers: [] -# -- A map of extra labels to apply to the router deploment and containers +# -- A map of extra labels to apply to the resources created by this chart # Example: # extraLabels: # label_one_name: "label_one_value"