From 24c0c46768dd72c022eae02769121993a7ef131f Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 15 Nov 2022 19:11:28 +0000 Subject: [PATCH 01/45] chore(deps): update all non-major packages >= 1.0 --- Cargo.lock | 8 ++++---- apollo-router/Cargo.toml | 4 ++-- .../tracing/datadog-subgraph/package-lock.json | 14 +++++++------- dockerfiles/tracing/datadog-subgraph/package.json | 2 +- .../tracing/jaeger-subgraph/package-lock.json | 14 +++++++------- dockerfiles/tracing/jaeger-subgraph/package.json | 2 +- .../tracing/zipkin-subgraph/package-lock.json | 14 +++++++------- dockerfiles/tracing/zipkin-subgraph/package.json | 2 +- 8 files changed, 30 insertions(+), 30 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a7cdcd911b..450422f93b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2267,9 +2267,9 @@ dependencies = [ [[package]] name = "insta" -version = "1.21.0" +version = "1.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581d4e3314cae4536e5d22ffd23189d4a374696c5ef733eadafae0ed273fd303" +checksum = "ba1e75aa1530e7385af7b2685478dece08dafb9db3b4225c753286decea83bef" dependencies = [ "console 0.15.2", "lazy_static", @@ -5551,9 +5551,9 @@ checksum = "e8db7427f936968176eaa7cdf81b7f98b980b18495ec28f1b5791ac3bfe3eea9" [[package]] name = "uuid" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feb41e78f93363bb2df8b0e86a2ca30eed7806ea16ea0c790d757cf93f79be83" +checksum = "422ee0de9031b5b948b97a8fc04e3aa35230001a722ddd27943e0be31564ce4c" dependencies = [ "getrandom", "serde", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index c4e22957ee..050669a213 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -196,7 +196,7 @@ tracing-subscriber = { version = "0.3.11", features = ["env-filter", "json"] } url = { version = "2.3.1", features = ["serde"] } urlencoding = "2.1.2" -uuid = { version = "1.2.1", features = ["serde", "v4"] } +uuid = { version = "1.2.2", features = ["serde", "v4"] } yaml-rust = "0.4.5" askama = "0.11.1" @@ -207,7 +207,7 @@ uname = "0.1.1" uname = "0.1.1" [dev-dependencies] -insta = { version = "1.21.0", features = ["json", "redactions"] } +insta = { version = "1.21.1", features = ["json", "redactions"] } introspector-gadget = "0.1.0" jsonpath_lib = "0.3.0" maplit = "1.0.2" diff --git a/dockerfiles/tracing/datadog-subgraph/package-lock.json b/dockerfiles/tracing/datadog-subgraph/package-lock.json index 53d8f132db..5f0625b622 100644 --- a/dockerfiles/tracing/datadog-subgraph/package-lock.json +++ b/dockerfiles/tracing/datadog-subgraph/package-lock.json @@ -17,7 +17,7 @@ "graphql": "^16.5.0" }, "devDependencies": { - "typescript": "4.8.4" + "typescript": "4.9.3" } }, "node_modules/@apollo/cache-control-types": { @@ -1718,9 +1718,9 @@ } }, "node_modules/typescript": { - "version": "4.8.4", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.8.4.tgz", - "integrity": "sha512-QCh+85mCy+h0IGff8r5XWzOVSbBO+KfeYrMQh7NJ58QujwcE22u+NUSmUxqF+un70P9GXKxa2HCNiTTMJknyjQ==", + "version": "4.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.9.3.tgz", + "integrity": "sha512-CIfGzTelbKNEnLpLdGFgdyKhG23CKdKgQPOBc+OUNrkJ2vr+KSzsSV5kq5iWhEQbok+quxgGzrAtGWCyU7tHnA==", "dev": true, "bin": { "tsc": "bin/tsc", @@ -3119,9 +3119,9 @@ } }, "typescript": { - "version": "4.8.4", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.8.4.tgz", - "integrity": "sha512-QCh+85mCy+h0IGff8r5XWzOVSbBO+KfeYrMQh7NJ58QujwcE22u+NUSmUxqF+un70P9GXKxa2HCNiTTMJknyjQ==", + "version": "4.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.9.3.tgz", + "integrity": "sha512-CIfGzTelbKNEnLpLdGFgdyKhG23CKdKgQPOBc+OUNrkJ2vr+KSzsSV5kq5iWhEQbok+quxgGzrAtGWCyU7tHnA==", "dev": true }, "unpipe": { diff --git a/dockerfiles/tracing/datadog-subgraph/package.json b/dockerfiles/tracing/datadog-subgraph/package.json index 6019b4b26e..133c1ea381 100644 --- a/dockerfiles/tracing/datadog-subgraph/package.json +++ b/dockerfiles/tracing/datadog-subgraph/package.json @@ -18,6 +18,6 @@ "graphql": "^16.5.0" }, "devDependencies": { - "typescript": "4.8.4" + "typescript": "4.9.3" } } diff --git a/dockerfiles/tracing/jaeger-subgraph/package-lock.json b/dockerfiles/tracing/jaeger-subgraph/package-lock.json index 42416bfd1b..343b0c96f8 100644 --- a/dockerfiles/tracing/jaeger-subgraph/package-lock.json +++ b/dockerfiles/tracing/jaeger-subgraph/package-lock.json @@ -18,7 +18,7 @@ "opentracing": "^0.14.7" }, "devDependencies": { - "typescript": "4.8.4" + "typescript": "4.9.3" } }, "node_modules/@apollo/cache-control-types": { @@ -1354,9 +1354,9 @@ } }, "node_modules/typescript": { - "version": "4.8.4", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.8.4.tgz", - "integrity": "sha512-QCh+85mCy+h0IGff8r5XWzOVSbBO+KfeYrMQh7NJ58QujwcE22u+NUSmUxqF+un70P9GXKxa2HCNiTTMJknyjQ==", + "version": "4.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.9.3.tgz", + "integrity": "sha512-CIfGzTelbKNEnLpLdGFgdyKhG23CKdKgQPOBc+OUNrkJ2vr+KSzsSV5kq5iWhEQbok+quxgGzrAtGWCyU7tHnA==", "dev": true, "bin": { "tsc": "bin/tsc", @@ -2477,9 +2477,9 @@ } }, "typescript": { - "version": "4.8.4", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.8.4.tgz", - "integrity": "sha512-QCh+85mCy+h0IGff8r5XWzOVSbBO+KfeYrMQh7NJ58QujwcE22u+NUSmUxqF+un70P9GXKxa2HCNiTTMJknyjQ==", + "version": "4.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.9.3.tgz", + "integrity": "sha512-CIfGzTelbKNEnLpLdGFgdyKhG23CKdKgQPOBc+OUNrkJ2vr+KSzsSV5kq5iWhEQbok+quxgGzrAtGWCyU7tHnA==", "dev": true }, "unpipe": { diff --git a/dockerfiles/tracing/jaeger-subgraph/package.json b/dockerfiles/tracing/jaeger-subgraph/package.json index 73d9e32d82..92ed871e06 100644 --- a/dockerfiles/tracing/jaeger-subgraph/package.json +++ b/dockerfiles/tracing/jaeger-subgraph/package.json @@ -19,6 +19,6 @@ "opentracing": "^0.14.7" }, "devDependencies": { - "typescript": "4.8.4" + "typescript": "4.9.3" } } diff --git a/dockerfiles/tracing/zipkin-subgraph/package-lock.json b/dockerfiles/tracing/zipkin-subgraph/package-lock.json index 64a66c18a0..c8c49d6cb5 100644 --- a/dockerfiles/tracing/zipkin-subgraph/package-lock.json +++ b/dockerfiles/tracing/zipkin-subgraph/package-lock.json @@ -19,7 +19,7 @@ "zipkin-javascript-opentracing": "^3.0.0" }, "devDependencies": { - "typescript": "4.8.4" + "typescript": "4.9.3" } }, "node_modules/@apollo/cache-control-types": { @@ -1381,9 +1381,9 @@ } }, "node_modules/typescript": { - "version": "4.8.4", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.8.4.tgz", - "integrity": "sha512-QCh+85mCy+h0IGff8r5XWzOVSbBO+KfeYrMQh7NJ58QujwcE22u+NUSmUxqF+un70P9GXKxa2HCNiTTMJknyjQ==", + "version": "4.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.9.3.tgz", + "integrity": "sha512-CIfGzTelbKNEnLpLdGFgdyKhG23CKdKgQPOBc+OUNrkJ2vr+KSzsSV5kq5iWhEQbok+quxgGzrAtGWCyU7tHnA==", "dev": true, "bin": { "tsc": "bin/tsc", @@ -2548,9 +2548,9 @@ } }, "typescript": { - "version": "4.8.4", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.8.4.tgz", - "integrity": "sha512-QCh+85mCy+h0IGff8r5XWzOVSbBO+KfeYrMQh7NJ58QujwcE22u+NUSmUxqF+un70P9GXKxa2HCNiTTMJknyjQ==", + "version": "4.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.9.3.tgz", + "integrity": "sha512-CIfGzTelbKNEnLpLdGFgdyKhG23CKdKgQPOBc+OUNrkJ2vr+KSzsSV5kq5iWhEQbok+quxgGzrAtGWCyU7tHnA==", "dev": true }, "unpipe": { diff --git a/dockerfiles/tracing/zipkin-subgraph/package.json b/dockerfiles/tracing/zipkin-subgraph/package.json index e0d27f5259..0dd058f839 100644 --- a/dockerfiles/tracing/zipkin-subgraph/package.json +++ b/dockerfiles/tracing/zipkin-subgraph/package.json @@ -20,6 +20,6 @@ "zipkin-javascript-opentracing": "^3.0.0" }, "devDependencies": { - "typescript": "4.8.4" + "typescript": "4.9.3" } } From 7a931dee51d72405edc4129dd7f615e3e2cbbaa9 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 15 Nov 2022 22:41:15 +0000 Subject: [PATCH 02/45] fix(deps): update rust crate hyper-rustls to 0.23.1 --- Cargo.lock | 4 ++-- apollo-router/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 450422f93b..121633f063 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2168,9 +2168,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.23.0" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" +checksum = "59df7c4e19c950e6e0e868dcc0a300b09a9b88e9ec55bd879ca819087a77355d" dependencies = [ "http", "hyper", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 050669a213..389bc89d6e 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -81,7 +81,7 @@ http-body = "0.4.5" humantime = "2.1.0" humantime-serde = "1.1.1" hyper = { version = "0.14.23", features = ["server", "client"] } -hyper-rustls = { version = "0.23.0", features = ["http1", "http2"] } +hyper-rustls = { version = "0.23.1", features = ["http1", "http2"] } indexmap = { version = "1.9.1", features = ["serde-1"] } itertools = "0.10.5" jsonschema = { version = "0.16.1", default-features = false } From 5306abeca2102205aeb88dfb71e21817ca4bde9a Mon Sep 17 00:00:00 2001 From: Lenny Burdette Date: Wed, 16 Nov 2022 13:33:29 -0500 Subject: [PATCH 03/45] docs: remove note about external spaceport in advanced configuration (#1939) --- .../source/configuration/apollo-telemetry.mdx | 71 +++++++------------ 1 file changed, 24 insertions(+), 47 deletions(-) diff --git a/docs/source/configuration/apollo-telemetry.mdx b/docs/source/configuration/apollo-telemetry.mdx index 01ada913ae..88dca80e97 100644 --- a/docs/source/configuration/apollo-telemetry.mdx +++ b/docs/source/configuration/apollo-telemetry.mdx @@ -2,7 +2,7 @@ title: Sending Apollo Router usage data to Apollo Studio --- -The Apollo Router can transmit usage data to Apollo Studio via a reporting agent called **Spaceport**. By default, Spaceport runs automatically as a component _within_ the Apollo Router. Additional details on its modes of operation are provided below. +The Apollo Router can transmit usage data to Apollo Studio via a reporting agent called **Spaceport**. ## Enabling usage reporting @@ -24,59 +24,36 @@ telemetry: apollo: # This example will trace half of requests. This number can't # be higher than tracing.trace_config.sampler. - field_level_instrumentation_sampler: 0.5 + field_level_instrumentation_sampler: 0.5 tracing: trace_config: # FTV1 uses the same trace sampling as other tracing options, - # so this value must be set as well. - sampler: 0.5 + # so this value must be set as well. + sampler: 0.5 ``` -## Prometheus endpoint customization +Note that `field_level_instrumentation_sampler` may not sample at a greater rate than `trace_config.sampler`. -You can customize the prometheus scrape URL in your YAML configuration file: +## Advanced configuration ```yaml title="router.yaml" telemetry: - metrics: - prometheus: - listen: 0.0.0.0:9090 - path: /metrics - enabled: true -``` - -## Advanced configuration (not recommended) - -Spaceport can run either as an internal component of a single Apollo Router instance, or as an external resource shared by _multiple_ router instances. - -For the majority of users, an internal Spaceport instance is sufficient. - -To connect the Apollo Router to an external Spaceport instance, specify its endpoint URL in your YAML config file: - -```yaml title="router.yaml" -telemetry: - apollo: - # The percentage of requests will include HTTP request and response headers in traces sent to Apollo Studio. - # This is expensive and should be left at a low value. - # This cannot be higher than tracing->trace_config->sampler - field_level_instrumentation_sampler: 0.01 # (default) - - # Include HTTP request and response headers in traces sent to Apollo Studio - send_headers: # other possible values are all, only (with an array), except (with an array), none (by default) - except: # Send all headers except referer - - referer - - # Include variable values in Apollo in traces sent to Apollo Studio - send_variable_values: # other possible values are all, only (with an array), except (with an array), none (by default) - except: # Send all variable values except for variable named first - - first - tracing: - trace_config: - sampler: 0.5 # The percentage of requests that will generate traces (a rate or `always_on` or `always_off`) + apollo: + # The percentage of requests will include HTTP request and response headers in traces sent to Apollo Studio. + # This is expensive and should be left at a low value. + # This cannot be higher than tracing->trace_config->sampler + field_level_instrumentation_sampler: 0.01 # (default) + + # Include HTTP request and response headers in traces sent to Apollo Studio + send_headers: # other possible values are all, only (with an array), except (with an array), none (by default) + except: # Send all headers except referer + - referer + + # Include variable values in Apollo in traces sent to Apollo Studio + send_variable_values: # other possible values are all, only (with an array), except (with an array), none (by default) + except: # Send all variable values except for variable named first + - first + tracing: + trace_config: + sampler: 0.5 # The percentage of requests that will generate traces (a rate or `always_on` or `always_off`) ``` - -Note that `field_level_instrumentation_sampler` may not sample at a greater rate than `trace_config/sampler`.**** - -## Running Spaceport externally (not recommended) - -Running spaceport as a separate process currently requires building from [source](https://github.com/apollographql/router/tree/main/apollo-spaceport). From 519a52f629c88d87e63986f185d7473b0f4ffe46 Mon Sep 17 00:00:00 2001 From: Gary Pennington Date: Thu, 17 Nov 2022 10:26:49 +0000 Subject: [PATCH 04/45] add a supergraph configmap option to the helm chart (#2119) Here's an example of values.yaml that you could use to mount this to your container: ``` extraEnvVars: - name: APOLLO_ROUTER_SUPERGRAPH_PATH value: /data/supergraph-schema.graphql extraVolumeMounts: - name: supergraph-schema mountPath: /data readOnly: true extraVolumes: - name: supergraph-schema configMap: name: "{{ .Release.Name }}-supergraph" items: - key: supergraph-schema.graphql path: supergraph-schema.graphql ``` Note: This takes advantage of the fact that we `tpl` template the extraVolumes in the deployment template, so {{ .Release.Name }} will be templated into the release name at install. You don't have to do this, you could just hard-code it, but this is neater. Here's an example command line: ``` helm upgrade --install --create-namespace --namespace router-test --set-file supergraphFile=supergraph-schema.graphql router-test oci://ghcr.io/apollographql/helm-charts/router --version 1.0.0-rc.9 --values values.yaml ``` NB: rc.9 doesn't exist, so the command is purely illustrative to show how this works. --- NEXT_CHANGELOG.md | 32 +++++++++++++++++++ .../chart/router/templates/supergraph-cm.yaml | 12 +++++++ helm/chart/router/values.yaml | 4 +++ 3 files changed, 48 insertions(+) create mode 100644 helm/chart/router/templates/supergraph-cm.yaml diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 5e54d34b87..9ac1a0396a 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -26,6 +26,38 @@ By [@USERNAME](https://github.com/USERNAME) in https://github.com/apollographql/ # [x.x.x] (unreleased) - 2022-mm-dd ## ❗ BREAKING ❗ ## 🚀 Features + +### Add a supergraph configmap option to the helm chart ([PR #2119](https://github.com/apollographql/router/pull/2119)) + +Adds the capability to create a configmap containing your supergraph schema. Here's an example of how you could make use of this from your values.yaml and with the `helm` install command. + +```yaml +extraEnvVars: + - name: APOLLO_ROUTER_SUPERGRAPH_PATH + value: /data/supergraph-schema.graphql + +extraVolumeMounts: + - name: supergraph-schema + mountPath: /data + readOnly: true + +extraVolumes: + - name: supergraph-schema + configMap: + name: "{{ .Release.Name }}-supergraph" + items: + - key: supergraph-schema.graphql + path: supergraph-schema.graphql +``` + +With that values.yaml content, and with your supergraph schema in a file name supergraph-schema.graphql, you can execute: + +``` +helm upgrade --install --create-namespace --namespace router-test --set-file supergraphFile=supergraph-schema.graphql router-test oci://ghcr.io/apollographql/helm-charts/router --version 1.0.0-rc.9 --values values.yaml +``` + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/2119 + ## 🐛 Fixes ## 🛠 Maintenance ## 📚 Documentation diff --git a/helm/chart/router/templates/supergraph-cm.yaml b/helm/chart/router/templates/supergraph-cm.yaml new file mode 100644 index 0000000000..40ff9d073a --- /dev/null +++ b/helm/chart/router/templates/supergraph-cm.yaml @@ -0,0 +1,12 @@ +{{- if .Values.supergraphFile }} +{{- $routerFullName := include "router.fullname" . -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ $routerFullName }}-supergraph + labels: + {{- include "router.labels" . | nindent 4 }} +data: + supergraph-schema.graphql: |- +{{ .Values.supergraphFile | indent 4 }} +{{- end }} diff --git a/helm/chart/router/values.yaml b/helm/chart/router/values.yaml index 2caaf0b8ed..62a5fbfb9d 100644 --- a/helm/chart/router/values.yaml +++ b/helm/chart/router/values.yaml @@ -30,6 +30,10 @@ managedFederation: # -- If using managed federation, the variant of which graph to use graphRef: "" +# This should not be specified in values.yaml. It's much simpler to use --set-file from helm command line. +# e.g.: helm ... --set-file supergraphFile="location of your supergraph file" +supergraphFile: + # An array of extra environmental variables # Example: # extraEnvVars: From 5ed584ba27469a51cf03a4e3b003a7aae414287a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 17 Nov 2022 11:47:45 +0000 Subject: [PATCH 05/45] chore(deps): update rust crate reqwest to 0.11.13 --- Cargo.lock | 4 ++-- apollo-router/Cargo.toml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ac7a886ea7..6e99005130 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3820,9 +3820,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.12" +version = "0.11.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "431949c384f4e2ae07605ccaa56d1d9d2ecdb5cadd4f9577ccfab29f2e5149fc" +checksum = "68cc60575865c7831548863cc02356512e3f1dc2f3f82cb837d7fc4cc8f3c97c" dependencies = [ "base64 0.13.1", "bytes", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 0b75a52547..90b3d9c530 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -145,7 +145,7 @@ rhai = { version = "1.11.0", features = ["sync", "serde", "internals"] } redis = { version = "0.21.6", optional = true, features = ["cluster", "tokio-comp"] } redis_cluster_async = { version = "0.7.0", optional = true } regex = "1.7.0" -reqwest = { version = "0.11.12", default-features = false, features = [ +reqwest = { version = "0.11.13", default-features = false, features = [ "rustls-tls", "json", "stream", @@ -214,7 +214,7 @@ maplit = "1.0.2" memchr = { version = "2.5.0", default-features = false } mockall = "0.11.3" once_cell = "1.16.0" -reqwest = { version = "0.11.12", default-features = false, features = [ +reqwest = { version = "0.11.13", default-features = false, features = [ "json", "stream", ] } From 538a06984f797688d9e32ae5e9467d5a7857dc1e Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 17 Nov 2022 22:39:23 +0000 Subject: [PATCH 06/45] fix(deps): update rust crate indexmap to 1.9.2 --- Cargo.lock | 4 ++-- apollo-router/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6e99005130..f91e18a67a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2224,9 +2224,9 @@ checksum = "cb56e1aa765b4b4f3aadfab769793b7087bb03a4ea4920644a6d238e2df5b9ed" [[package]] name = "indexmap" -version = "1.9.1" +version = "1.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" +checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" dependencies = [ "autocfg", "hashbrown", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 90b3d9c530..a3a42e34b9 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -82,7 +82,7 @@ humantime = "2.1.0" humantime-serde = "1.1.1" hyper = { version = "0.14.23", features = ["server", "client"] } hyper-rustls = { version = "0.23.1", features = ["http1", "http2"] } -indexmap = { version = "1.9.1", features = ["serde-1"] } +indexmap = { version = "1.9.2", features = ["serde-1"] } itertools = "0.10.5" jsonschema = { version = "0.16.1", default-features = false } lazy_static = "1.4.0" From f08aa5fd7cdb2162ae7340ea58e8d03b3f8c1b4f Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 18 Nov 2022 10:20:42 +0000 Subject: [PATCH 07/45] fix(deps): update rust crate serde_json to 1.0.88 --- Cargo.lock | 4 ++-- apollo-router/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f91e18a67a..958c6f8b58 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4366,9 +4366,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.87" +version = "1.0.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45" +checksum = "8e8b3801309262e8184d9687fb697586833e939767aea0dda89f5a8e650e8bd7" dependencies = [ "indexmap", "itoa 1.0.4", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index a3a42e34b9..ae95af0d27 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -156,7 +156,7 @@ shellexpand = "2.1.2" sha2 = "0.10.6" serde = { version = "1.0.147", features = ["derive", "rc"] } serde_json_bytes = { version = "0.2.0", features = ["preserve_order"] } -serde_json = { version = "1.0.87", features = ["preserve_order"] } +serde_json = { version = "1.0.88", features = ["preserve_order"] } serde_urlencoded = "0.7.1" serde_yaml = "0.8.26" static_assertions = "1.1.0" From e4f9dfb9c744152af8c9e5977b5839ee73d6d51b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 19 Nov 2022 01:16:16 +0000 Subject: [PATCH 08/45] fix(deps): update all non-major packages >= 1.0 --- Cargo.lock | 4 ++-- apollo-router/Cargo.toml | 2 +- dockerfiles/fed2-demo-gateway/package.json | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 958c6f8b58..8bc749f3fd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4999,9 +4999,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.21.2" +version = "1.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e03c497dc955702ba729190dc4aac6f2a0ce97f913e5b1b5912fc5039d9099" +checksum = "d76ce4a75fb488c605c54bf610f221cea8b0dafb53333c1a67e8ee199dcd2ae3" dependencies = [ "autocfg", "bytes", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index ae95af0d27..d908c7b185 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -162,7 +162,7 @@ serde_yaml = "0.8.26" static_assertions = "1.1.0" sys-info = "0.9.1" thiserror = "1.0.37" -tokio = { version = "1.21.2", features = ["full"] } +tokio = { version = "1.22.0", features = ["full"] } tokio-stream = { version = "0.1.11", features = ["sync", "net"] } tokio-util = { version = "0.7.4", features = ["net", "codec"] } tonic = { version = "0.6.2", features = ["transport", "tls", "tls-roots"] } diff --git a/dockerfiles/fed2-demo-gateway/package.json b/dockerfiles/fed2-demo-gateway/package.json index d7241ba418..c3f06df776 100644 --- a/dockerfiles/fed2-demo-gateway/package.json +++ b/dockerfiles/fed2-demo-gateway/package.json @@ -8,7 +8,7 @@ }, "dependencies": { "@apollo/server": "4.1.1", - "@apollo/gateway": "2.1.4", + "@apollo/gateway": "2.2.0", "supergraph-demo-opentelemetry": "0.2.4", "graphql": "16.6.0" }, From 94937358ad2005d287786f43d237dfdb7bce7b83 Mon Sep 17 00:00:00 2001 From: Col Date: Mon, 21 Nov 2022 17:49:43 +0800 Subject: [PATCH 09/45] Improve errors when subgraph service returns a non-2xx status code (#2118) This PR tries to address Issue #2117 Here's an example of how the Router would now respond when a subgraph service returns a non-2xx status code and content-type not set to `application/json`. ``` { "data": null, "errors": [ { "message": "HTTP fetch failed from 'my-service': 401 Unauthorized", "path": [], "extensions": { "type": "SubrequestHttpError", "service": "my-service", "reason": "HTTP fetch failed from 'my-service': 401 Unauthorized" } } ] } ``` --- NEXT_CHANGELOG.md | 7 ++ .../src/services/subgraph_service.rs | 77 ++++++++++++++++--- 2 files changed, 72 insertions(+), 12 deletions(-) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 9ac1a0396a..44917c7c73 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -59,5 +59,12 @@ helm upgrade --install --create-namespace --namespace router-test --set-file sup By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/2119 ## 🐛 Fixes + +### Improve errors when subgraph returns non-GraphQL response with a non-2xx status code ([Issue #2117](https://github.com/apollographql/router/issues/2117)) + +The error response will now contain the status code and status name. Example: `HTTP fetch failed from 'my-service': 401 Unauthorized` + +By [@col](https://github.com/col) in https://github.com/apollographql/router/pull/2118 + ## 🛠 Maintenance ## 📚 Documentation diff --git a/apollo-router/src/services/subgraph_service.rs b/apollo-router/src/services/subgraph_service.rs index 81fcc32651..1df74aecab 100644 --- a/apollo-router/src/services/subgraph_service.rs +++ b/apollo-router/src/services/subgraph_service.rs @@ -186,10 +186,21 @@ impl tower::Service for SubgraphService { if !content_type_str.contains(APPLICATION_JSON_HEADER_VALUE) && !content_type_str.contains(GRAPHQL_JSON_RESPONSE_HEADER_VALUE) { - return Err(BoxError::from(FetchError::SubrequestHttpError { - service: service_name.clone(), - reason: format!("subgraph didn't return JSON (expected content-type: application/json or content-type: application/graphql+json; found content-type: {content_type:?})"), - })); + return if !parts.status.is_success() { + Err(BoxError::from(FetchError::SubrequestHttpError { + service: service_name.clone(), + reason: format!( + "{}: {}", + parts.status.as_str(), + parts.status.canonical_reason().unwrap_or("Unknown") + ), + })) + } else { + Err(BoxError::from(FetchError::SubrequestHttpError { + service: service_name.clone(), + reason: format!("subgraph didn't return JSON (expected content-type: application/json or content-type: application/graphql+json; found content-type: {content_type:?})"), + })) + }; } } } @@ -376,9 +387,22 @@ mod tests { let make_svc = make_service_fn(|_conn| async { Ok::<_, Infallible>(service_fn(handle)) }); let server = Server::bind(&socket_addr).serve(make_svc); - if let Err(e) = server.await { - eprintln!("server error: {}", e); + server.await.unwrap(); + } + + // starts a local server emulating a subgraph returning status code 401 + async fn emulate_subgraph_unauthorized(socket_addr: SocketAddr) { + async fn handle(_request: http::Request) -> Result, Infallible> { + Ok(http::Response::builder() + .header(CONTENT_TYPE, "text/html") + .status(StatusCode::UNAUTHORIZED) + .body(r#""#.into()) + .unwrap()) } + + let make_svc = make_service_fn(|_conn| async { Ok::<_, Infallible>(service_fn(handle)) }); + let server = Server::bind(&socket_addr).serve(make_svc); + server.await.unwrap(); } // starts a local server emulating a subgraph returning bad response format @@ -393,9 +417,7 @@ mod tests { let make_svc = make_service_fn(|_conn| async { Ok::<_, Infallible>(service_fn(handle)) }); let server = Server::bind(&socket_addr).serve(make_svc); - if let Err(e) = server.await { - eprintln!("server error: {}", e); - } + server.await.unwrap(); } // starts a local server emulating a subgraph returning compressed response @@ -442,9 +464,7 @@ mod tests { let make_svc = make_service_fn(|_conn| async { Ok::<_, Infallible>(service_fn(handle)) }); let server = Server::bind(&socket_addr).serve(make_svc); - if let Err(e) = server.await { - eprintln!("server error: {}", e); - } + server.await.unwrap(); } #[tokio::test(flavor = "multi_thread")] @@ -549,4 +569,37 @@ mod tests { assert_eq!(resp.response.body(), &resp_from_subgraph); } + + #[tokio::test(flavor = "multi_thread")] + async fn test_unauthorized() { + let socket_addr = SocketAddr::from_str("127.0.0.1:2828").unwrap(); + tokio::task::spawn(emulate_subgraph_unauthorized(socket_addr)); + let subgraph_service = SubgraphService::new("test"); + + let url = Uri::from_str(&format!("http://{}", socket_addr)).unwrap(); + let err = subgraph_service + .oneshot(SubgraphRequest { + supergraph_request: Arc::new( + http::Request::builder() + .header(HOST, "host") + .header(CONTENT_TYPE, APPLICATION_JSON_HEADER_VALUE) + .body(Request::builder().query("query").build()) + .expect("expecting valid request"), + ), + subgraph_request: http::Request::builder() + .header(HOST, "rhost") + .header(CONTENT_TYPE, APPLICATION_JSON_HEADER_VALUE) + .uri(url) + .body(Request::builder().query("query").build()) + .expect("expecting valid request"), + operation_kind: OperationKind::Query, + context: Context::new(), + }) + .await + .unwrap_err(); + assert_eq!( + err.to_string(), + "HTTP fetch failed from 'test': 401: Unauthorized" + ); + } } From 8a7888e68729b5442a5ddd23d444cd7962cf5d5f Mon Sep 17 00:00:00 2001 From: Gary Pennington Date: Mon, 21 Nov 2022 15:28:55 +0000 Subject: [PATCH 10/45] update documentation to reflect new examples structure (#2133) fixes: #2095 Most of the example references are still valid, so I've only updated the ones that raised 404s. I've also fixed a factual error about field accessibility. --- NEXT_CHANGELOG.md | 7 +++++++ docs/source/customizations/rhai-api.mdx | 8 ++++---- docs/source/customizations/rhai.mdx | 2 +- 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 44917c7c73..f8b89474ea 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -68,3 +68,10 @@ By [@col](https://github.com/col) in https://github.com/apollographql/router/pul ## 🛠 Maintenance ## 📚 Documentation + +### update documentation to reflect new examples structure ([Issue #2095](https://github.com/apollographql/router/pull/2133)) + +We recently updated the examples directory structure. This fixes the documentation links to the examples. It also makes clear that rhai subgraph fields are read-only, since they are shared resources. + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/2133 + diff --git a/docs/source/customizations/rhai-api.mdx b/docs/source/customizations/rhai-api.mdx index 2296313943..201a616262 100644 --- a/docs/source/customizations/rhai-api.mdx +++ b/docs/source/customizations/rhai-api.mdx @@ -167,7 +167,7 @@ fn supergraph_service(service) { ## `Request` interface -All callback functions registered via `map_request` are passed a `request` object that represents the request sent by the client. This object provides the following fields, any of which a callback can modify in-place: +All callback functions registered via `map_request` are passed a `request` object that represents the request sent by the client. This object provides the following fields, any of which a callback can modify in-place (read-write): ``` request.context @@ -192,7 +192,7 @@ request.subgraph.uri.host request.subgraph.uri.path ``` -All of these fields are read/write. +These additional fields are shared across all subgraph invocations and are thus read-only. ### `request.context` @@ -367,7 +367,7 @@ print(`${response.body.label}`); // logs the response label A response may contain data (some responses with errors do not contain data). Be careful when manipulating data (and errors) to make sure that response remain valid. `data` is exposed to Rhai as an [Object Map](https://rhai.rs/book/language/object-maps.html). -There is a complete example of interacting with the response data in the [examples/rhai-data-response-mutate directory](https://github.com/apollographql/router/tree/main/examples/rhai-data-response-mutate). +There is a complete example of interacting with the response data in the [examples/data-response-mutate directory](https://github.com/apollographql/router/tree/main/examples/data-response-mutate). ```rhai print(`${response.body.data}`); // logs the response data @@ -385,7 +385,7 @@ Each Error must contain at least: Optionally, an error may also contain extensions, which are represented as an Object Map. -There is a complete example of interacting with the response errors in the [examples/rhai-error-response-mutate directory](https://github.com/apollographql/router/tree/main/examples/rhai-error-response-mutate). +There is a complete example of interacting with the response errors in the [examples/error-response-mutate directory](https://github.com/apollographql/router/tree/main/examples/error-response-mutate). ```rhai // Create an error with our message diff --git a/docs/source/customizations/rhai.mdx b/docs/source/customizations/rhai.mdx index 079bdb6b99..ed652422d3 100644 --- a/docs/source/customizations/rhai.mdx +++ b/docs/source/customizations/rhai.mdx @@ -301,4 +301,4 @@ If your router customization needs to do any of these, you can instead create a The Apollo Router requires that its Rhai engine implements the [sync feature](https://rhai.rs/book/start/features.html) to guarantee data integrity within the router's multi-threading execution environment. This means that [shared values](https://rhai.rs/book/language/fn-closure.html?highlight=deadlock#data-races-in-sync-builds-can-become-deadlocks) within Rhai might cause a deadlock. -This is particularly risky when using closures within callbacks while referencing external data. Take particular care to avoid this kind of situation by making copies of data when required. The [examples/rhai-surrogate-cache-key directory](https://github.com/apollographql/router/tree/main/examples/rhai-surrogate-cache-key) contains a good example of this, where "closing over" `response.headers` would cause a deadlock. To avoid this, a local copy of the required data is obtained and used in the closure. +This is particularly risky when using closures within callbacks while referencing external data. Take particular care to avoid this kind of situation by making copies of data when required. The [examples/surrogate-cache-key directory](https://github.com/apollographql/router/tree/main/examples/surrogate-cache-key) contains a good example of this, where "closing over" `response.headers` would cause a deadlock. To avoid this, a local copy of the required data is obtained and used in the closure. From d4060a98ec9b2d915e21e0a117fe1c7d6a1af0c4 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 21 Nov 2022 19:27:35 +0000 Subject: [PATCH 11/45] fix(deps): update rust crate bytes to 1.3.0 --- Cargo.lock | 4 ++-- apollo-router/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8bc749f3fd..7239115ca3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -665,9 +665,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" +checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" [[package]] name = "cache-padded" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index d908c7b185..be39c059b4 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -54,7 +54,7 @@ axum = { version = "0.5.17", features = ["headers", "json", "original-uri"] } backtrace = "0.3.66" base64 = "0.13.1" buildstructor = "0.5.1" -bytes = "1.2.1" +bytes = "1.3.0" clap = { version = "3.2.23", default-features = false, features = [ "env", "derive", From 0689ab1aad4def9b22bfb10dd86eee7fada3d60c Mon Sep 17 00:00:00 2001 From: Coenen Benjamin Date: Tue, 22 Nov 2022 11:12:25 +0100 Subject: [PATCH 12/45] chore: update spaceport proto file (#2141) Signed-off-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> --- .../src/spaceport/proto/reports.proto | 40 ++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/apollo-router/src/spaceport/proto/reports.proto b/apollo-router/src/spaceport/proto/reports.proto index c7b1453a24..831f3abaf3 100644 --- a/apollo-router/src/spaceport/proto/reports.proto +++ b/apollo-router/src/spaceport/proto/reports.proto @@ -110,7 +110,7 @@ message Trace { // represents a node in the query plan, under which there is a trace tree for that service fetch. // In particular, each fetch node represents a call to an implementing service, and calls to implementing - // services may not be unique. See https://github.com/apollographql/apollo-server/blob/main/packages/apollo-gateway/src/QueryPlan.ts + // services may not be unique. See https://github.com/apollographql/federation/blob/main/query-planner-js/src/QueryPlan.ts // for more information and details. message QueryPlanNode { // This represents a set of nodes to be executed sequentially by the Gateway executor @@ -150,6 +150,33 @@ message Trace { repeated ResponsePathElement response_path = 1; QueryPlanNode node = 2; } + + // A `DeferNode` corresponds to one or more @defer at the same level of "nestedness" in the planned query. + message DeferNode { + DeferNodePrimary primary = 1; + repeated DeferredNode deferred = 2; + } + + message ConditionNode { + string condition = 1; + QueryPlanNode if_clause = 2; + QueryPlanNode else_clause = 3; + } + + message DeferNodePrimary { + QueryPlanNode node = 1; + } + message DeferredNode { + repeated DeferredNodeDepends depends = 1; + string label = 2; + ResponsePathElement path = 3; + QueryPlanNode node = 4; + } + message DeferredNodeDepends { + string id = 1; + string defer_label = 2; + } + message ResponsePathElement { oneof id { string field_name = 1; @@ -161,6 +188,8 @@ message Trace { ParallelNode parallel = 2; FetchNode fetch = 3; FlattenNode flatten = 4; + DeferNode defer = 5; + ConditionNode condition = 6; } } @@ -175,6 +204,15 @@ message Trace { // service, including errors. Node root = 14; + // If this is true, the trace is potentially missing some nodes that were + // present on the query plan. This can happen if the trace span buffer used + // in the router fills up and some spans have to be dropped. In these cases + // the overall trace timing will still be correct, but the trace data could + // be missing some referenced or executed fields, and some nodes may be + // missing. If this is true we should display a warning to the user when they + // view the trace in Explorer. + bool is_incomplete = 33; + // ------------------------------------------------------------------------- // Fields below this line are *not* included in federated traces (the traces // sent from federated services to the gateway). From 5412cd93bed9bca6b42e9e2b1326b154f7816ebf Mon Sep 17 00:00:00 2001 From: Gary Pennington Date: Tue, 22 Nov 2022 10:44:43 +0000 Subject: [PATCH 13/45] provide multi-arch (amd64/arm64) docker images for the router (#2138) fixes: #1932 fixes: #2098 The next time we release docker images, they will be multi-arch. Co-authored-by: Jesse Rosenberger --- .circleci/config.yml | 17 +++++++++-------- NEXT_CHANGELOG.md | 6 ++++++ 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index c09d662e03..8a2871e648 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -486,16 +486,17 @@ jobs: name: Docker build command: | ROUTER_TAG=ghcr.io/apollographql/router - # Build debug image - docker build --build-arg ROUTER_RELEASE=${VERSION} -f dockerfiles/Dockerfile.router -t ${ROUTER_TAG}:${VERSION}-debug . - # Build release image - docker build --build-arg ROUTER_RELEASE=${VERSION} -f dockerfiles/Dockerfile.router -t ${ROUTER_TAG}:${VERSION} . + # Create a multi-arch builder which works properly under qemu + docker run --rm --privileged multiarch/qemu-user-static --reset -p yes + docker context create buildx-build + docker buildx create --driver docker-container --use buildx-build + docker buildx inspect --bootstrap # Note: GH Token owned by apollo-bot2, no expire echo ${GITHUB_OCI_TOKEN} | docker login ghcr.io -u apollo-bot2 --password-stdin - # Push debug image - docker push ${ROUTER_TAG}:${VERSION}-debug - # Push release image - docker push ${ROUTER_TAG}:${VERSION} + # Build and push debug image + docker buildx build --platform linux/amd64,linux/arm64 --push --build-arg ROUTER_RELEASE=${VERSION} -f dockerfiles/Dockerfile.router -t ${ROUTER_TAG}:${VERSION}-debug . + # Build and push release image + docker buildx build --platform linux/amd64,linux/arm64 --push --build-arg ROUTER_RELEASE=${VERSION} -f dockerfiles/Dockerfile.router -t ${ROUTER_TAG}:${VERSION} . - run: name: Helm build command: | diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index f8b89474ea..62868a2d42 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -27,6 +27,12 @@ By [@USERNAME](https://github.com/USERNAME) in https://github.com/apollographql/ ## ❗ BREAKING ❗ ## 🚀 Features +### Provide multi-arch (amd64/arm64) Docker images for the Router ([Issue #1932](https://github.com/apollographql/router/pull/2138)) + +From the next release, our Docker images will be multi-arch. + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/2138 + ### Add a supergraph configmap option to the helm chart ([PR #2119](https://github.com/apollographql/router/pull/2119)) Adds the capability to create a configmap containing your supergraph schema. Here's an example of how you could make use of this from your values.yaml and with the `helm` install command. From 50b494396d498d29f37b0b2380a5bdc7943d978e Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 22 Nov 2022 15:37:56 +0000 Subject: [PATCH 14/45] fix(deps): update all non-major packages >= 1.0 --- Cargo.lock | 4 ++-- apollo-router/Cargo.toml | 2 +- dockerfiles/fed2-demo-gateway/package.json | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7239115ca3..2a598d38de 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4366,9 +4366,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.88" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e8b3801309262e8184d9687fb697586833e939767aea0dda89f5a8e650e8bd7" +checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db" dependencies = [ "indexmap", "itoa 1.0.4", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index be39c059b4..e2729dd665 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -156,7 +156,7 @@ shellexpand = "2.1.2" sha2 = "0.10.6" serde = { version = "1.0.147", features = ["derive", "rc"] } serde_json_bytes = { version = "0.2.0", features = ["preserve_order"] } -serde_json = { version = "1.0.88", features = ["preserve_order"] } +serde_json = { version = "1.0.89", features = ["preserve_order"] } serde_urlencoded = "0.7.1" serde_yaml = "0.8.26" static_assertions = "1.1.0" diff --git a/dockerfiles/fed2-demo-gateway/package.json b/dockerfiles/fed2-demo-gateway/package.json index c3f06df776..4a80957a91 100644 --- a/dockerfiles/fed2-demo-gateway/package.json +++ b/dockerfiles/fed2-demo-gateway/package.json @@ -8,7 +8,7 @@ }, "dependencies": { "@apollo/server": "4.1.1", - "@apollo/gateway": "2.2.0", + "@apollo/gateway": "2.2.1", "supergraph-demo-opentelemetry": "0.2.4", "graphql": "16.6.0" }, From 655689a182736148a5be66fc9cb3f8cd6c72c9ea Mon Sep 17 00:00:00 2001 From: Bryn Cooke Date: Wed, 23 Nov 2022 15:30:37 +0000 Subject: [PATCH 15/45] Fix naming inconsistency of telemetry.metrics.common.attributes.router (#2116) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Fix naming inconsistency of telemetry.metrics.common.attributes.router ([Issue #2076](https://github.com/apollographql/router/issues/2076)) Mirroring the rest of the config `router` should be `supergraph` ```yaml telemetry: metrics: common: attributes: router: # old ``` becomes ```yaml telemetry: metrics: common: attributes: supergraph: # new ``` ### Configuration upgrades ([Issue #2123](https://github.com/apollographql/router/issues/2123)) Occasionally we will make changes to the Router yaml configuration format. When starting the Router if the configuration can be upgraded it will do so automatically and display a warning: ``` 2022-11-22T14:01:46.884897Z WARN router configuration contains deprecated options: 1. telemetry.tracing.trace_config.attributes.router has been renamed to 'supergraph' for consistency These will become errors in the future. Run `router config upgrade ` to see a suggested upgraded configuration. ``` Note: If a configuration has errors after upgrading then the configuration will not be upgraded automatically. From the CLI users can run: * `router config upgrade ` to output configuration that has been upgraded to match the latest config format. * `router config upgrade --diff ` to output a diff e.g. ``` telemetry: apollo: client_name_header: apollographql-client-name metrics: common: attributes: - router: + supergraph: request: header: - named: "1" # foo ``` There are situations where comments and whitespace are not preserved. This may be improved in future. By [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographql/router/pull/2116 ### CLI structure changes ([Issue #2123](https://github.com/apollographql/router/issues/2123)) As the Router gains functionality the limitations of the current CLI structure are becoming apparent. There is now a separate subcommand for config related operations: * `config` * `schema` - Output the configuration schema * `upgrade` - Upgrade the configuration with optional diff support. `router --schema` has been deprecated and users should move to `router config schema`. Co-authored-by: bryn --- Cargo.lock | 102 ++++ NEXT_CHANGELOG.md | 70 +++ README.md | 37 +- apollo-router/Cargo.toml | 6 +- apollo-router/src/configuration/expansion.rs | 4 +- .../0001-telemetry_router_to_supergraph.yaml | 6 + .../src/configuration/migrations/README.md | 55 ++ apollo-router/src/configuration/mod.rs | 8 +- apollo-router/src/configuration/schema.rs | 28 +- .../snapshots/.skipconfigvalidation | 0 ...nfiguration__tests__schema_generation.snap | 502 +++++++++--------- ...etry_router_to_supergraph.router.yaml.snap | 14 + ...on__upgrade__test__copy_array_element.snap | 19 + ...figuration__upgrade__test__copy_field.snap | 19 + ...__upgrade__test__delete_array_element.snap | 13 + ...guration__upgrade__test__delete_field.snap | 13 + ...n__upgrade__test__diff_upgrade_output.snap | 10 + ...on__upgrade__test__move_array_element.snap | 18 + ...figuration__upgrade__test__move_field.snap | 18 + ...ration__upgrade__test__upgrade_output.snap | 8 + .../testdata/migrations/.skipconfigvalidation | 0 ...telemetry_router_to_supergraph.router.yaml | 8 + apollo-router/src/configuration/tests.rs | 66 ++- apollo-router/src/configuration/upgrade.rs | 325 ++++++++++++ apollo-router/src/executable.rs | 86 ++- .../src/plugins/telemetry/metrics/mod.rs | 2 +- apollo-router/src/plugins/telemetry/mod.rs | 10 +- docs/source/configuration/metrics.mdx | 2 +- docs/source/configuration/overview.mdx | 68 ++- 29 files changed, 1230 insertions(+), 287 deletions(-) create mode 100644 apollo-router/src/configuration/migrations/0001-telemetry_router_to_supergraph.yaml create mode 100644 apollo-router/src/configuration/migrations/README.md create mode 100644 apollo-router/src/configuration/snapshots/.skipconfigvalidation create mode 100644 apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@telemetry_router_to_supergraph.router.yaml.snap create mode 100644 apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__copy_array_element.snap create mode 100644 apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__copy_field.snap create mode 100644 apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__delete_array_element.snap create mode 100644 apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__delete_field.snap create mode 100644 apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__diff_upgrade_output.snap create mode 100644 apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__move_array_element.snap create mode 100644 apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__move_field.snap create mode 100644 apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__upgrade_output.snap create mode 100644 apollo-router/src/configuration/testdata/migrations/.skipconfigvalidation create mode 100644 apollo-router/src/configuration/testdata/migrations/telemetry_router_to_supergraph.router.yaml create mode 100644 apollo-router/src/configuration/upgrade.rs diff --git a/Cargo.lock b/Cargo.lock index 2a598d38de..077f8eba72 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -165,6 +165,7 @@ dependencies = [ "derivative", "derive_more", "dhat", + "diff", "directories", "displaydoc", "flate2", @@ -208,6 +209,7 @@ dependencies = [ "prometheus", "prost 0.9.0", "prost-types 0.9.0", + "proteus", "rand", "redis", "redis_cluster_async", @@ -215,6 +217,7 @@ dependencies = [ "reqwest", "rhai", "router-bridge", + "rust-embed", "schemars", "serde", "serde_json", @@ -1377,6 +1380,12 @@ dependencies = [ "tempfile", ] +[[package]] +name = "diff" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" + [[package]] name = "difflib" version = "0.4.0" @@ -1803,6 +1812,17 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "ghost" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb19fe8de3ea0920d282f7b77dd4227aea6b8b999b42cdf0ca41b2472b14443a" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "gimli" version = "0.26.2" @@ -2315,6 +2335,16 @@ dependencies = [ "tracing", ] +[[package]] +name = "inventory" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84344c6e0b90a9e2b6f3f9abe5cc74402684e348df7b32adca28747e0cef091a" +dependencies = [ + "ctor", + "ghost", +] + [[package]] name = "ipnet" version = "2.5.1" @@ -3657,6 +3687,20 @@ dependencies = [ "prost 0.11.2", ] +[[package]] +name = "proteus" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "279396105537894fdecabfba63493bc93192c94a97951bef640d2feac3cfc362" +dependencies = [ + "once_cell", + "regex", + "serde", + "serde_json", + "thiserror", + "typetag", +] + [[package]] name = "protobuf" version = "2.28.0" @@ -4063,6 +4107,40 @@ dependencies = [ "zeroize", ] +[[package]] +name = "rust-embed" +version = "6.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "283ffe2f866869428c92e0d61c2f35dfb4355293cdfdc48f49e895c15f1333d1" +dependencies = [ + "rust-embed-impl", + "rust-embed-utils", + "walkdir 2.3.2", +] + +[[package]] +name = "rust-embed-impl" +version = "6.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31ab23d42d71fb9be1b643fe6765d292c5e14d46912d13f3ae2815ca048ea04d" +dependencies = [ + "proc-macro2", + "quote", + "rust-embed-utils", + "syn", + "walkdir 2.3.2", +] + +[[package]] +name = "rust-embed-utils" +version = "7.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1669d81dfabd1b5f8e2856b8bbe146c6192b0ba22162edc738ac0a5de18f054" +dependencies = [ + "sha2", + "walkdir 2.3.2", +] + [[package]] name = "rustc-demangle" version = "0.1.21" @@ -5449,6 +5527,30 @@ version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" +[[package]] +name = "typetag" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4080564c5b2241b5bff53ab610082234e0c57b0417f4bd10596f183001505b8a" +dependencies = [ + "erased-serde", + "inventory", + "once_cell", + "serde", + "typetag-impl", +] + +[[package]] +name = "typetag-impl" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e60147782cc30833c05fba3bab1d9b5771b2685a2557672ac96fa5d154099c0e" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "ucd-trie" version = "0.1.5" diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 62868a2d42..8885cd66e2 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -25,6 +25,39 @@ By [@USERNAME](https://github.com/USERNAME) in https://github.com/apollographql/ # [x.x.x] (unreleased) - 2022-mm-dd ## ❗ BREAKING ❗ +### Fix naming inconsistency of telemetry.metrics.common.attributes.router ([Issue #2076](https://github.com/apollographql/router/issues/2076)) + +Mirroring the rest of the config `router` should be `supergraph` + +```yaml +telemetry: + metrics: + common: + attributes: + router: # old +``` +becomes +```yaml +telemetry: + metrics: + common: + attributes: + supergraph: # new +``` + +By [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographql/router/pull/2116 + +### CLI structure changes ([Issue #2123](https://github.com/apollographql/router/issues/2123)) + +As the Router gains functionality the limitations of the current CLI structure are becoming apparent. + +There is now a separate subcommand for config related operations: +* `config` + * `schema` - Output the configuration schema + * `upgrade` - Upgrade the configuration with optional diff support. + +`router --schema` has been deprecated and users should move to `router config schema`. + ## 🚀 Features ### Provide multi-arch (amd64/arm64) Docker images for the Router ([Issue #1932](https://github.com/apollographql/router/pull/2138)) @@ -64,6 +97,43 @@ helm upgrade --install --create-namespace --namespace router-test --set-file sup By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/2119 +### Configuration upgrades ([Issue #2123](https://github.com/apollographql/router/issues/2123)) + +Occasionally we will make changes to the Router yaml configuration format. +When starting the Router if the configuration can be upgraded it will do so automatically and display a warning: + +``` +2022-11-22T14:01:46.884897Z WARN router configuration contains deprecated options: + + 1. telemetry.tracing.trace_config.attributes.router has been renamed to 'supergraph' for consistency + +These will become errors in the future. Run `router config upgrade ` to see a suggested upgraded configuration. +``` + +Note: If a configuration has errors after upgrading then the configuration will not be upgraded automatically. + +From the CLI users can run: +* `router config upgrade ` to output configuration that has been upgraded to match the latest config format. +* `router config upgrade --diff ` to output a diff e.g. +``` + telemetry: + apollo: + client_name_header: apollographql-client-name + metrics: + common: + attributes: +- router: ++ supergraph: + request: + header: + - named: "1" # foo +``` + +There are situations where comments and whitespace are not preserved. This may be improved in future. + +By [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographql/router/pull/2116 + + ## 🐛 Fixes ### Improve errors when subgraph returns non-GraphQL response with a non-2xx status code ([Issue #2117](https://github.com/apollographql/router/issues/2117)) diff --git a/README.md b/README.md index f529b39272..5a15025f1f 100644 --- a/README.md +++ b/README.md @@ -26,11 +26,40 @@ specified via flag, either by an absolute path, or a path relative to the curren directory. ``` +USAGE: + router [OPTIONS] [SUBCOMMAND] + OPTIONS: - -c, --config Configuration file location - -s, --supergraph Supergraph Schema location - --hr, --hot-reload Watches for changes in the supergraph and configuration file - --schema Prints out a JSON schema of the configuration file + --apollo-uplink-endpoints + The endpoints (comma separated) polled to fetch the latest supergraph schema [env: + APOLLO_UPLINK_ENDPOINTS=] + + --apollo-uplink-poll-interval + The time between polls to Apollo uplink. Minimum 10s [env: APOLLO_UPLINK_POLL_INTERVAL=] + [default: 10s] + + -c, --config + Configuration location relative to the project directory [env: + APOLLO_ROUTER_CONFIG_PATH=] + + -h, --help + Print help information + + --hot-reload + Reload configuration and schema files automatically [env: APOLLO_ROUTER_HOT_RELOAD=] + + --log + Log level (off|error|warn|info|debug|trace) [env: APOLLO_ROUTER_LOG=] [default: info] + + -s, --supergraph + Schema location relative to the project directory [env: APOLLO_ROUTER_SUPERGRAPH_PATH=] + + -V, --version + Display version and exit + +SUBCOMMANDS: + config Configuration subcommands + help Print this message or the help of the given subcommand(s) ``` ## Who is Apollo? diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index e2729dd665..4434729778 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -70,6 +70,7 @@ derive_more = { version = "0.99.17", default-features = false, features = [ "display", ] } dhat = { version = "0.3.2", optional = true } +diff = "0.1.13" directories = "4.0.1" displaydoc = "0.2" flate2 = "1.0.24" @@ -84,6 +85,7 @@ hyper = { version = "0.14.23", features = ["server", "client"] } hyper-rustls = { version = "0.23.1", features = ["http1", "http2"] } indexmap = { version = "1.9.2", features = ["serde-1"] } itertools = "0.10.5" +jsonpath_lib = "0.3.0" jsonschema = { version = "0.16.1", default-features = false } lazy_static = "1.4.0" libc = "0.2.137" @@ -140,6 +142,7 @@ pin-project-lite = "0.2.9" prometheus = "0.13" prost = "0.9.0" prost-types = "0.9.0" +proteus = "0.5.0" rand = "0.8.5" rhai = { version = "1.11.0", features = ["sync", "serde", "internals"] } redis = { version = "0.21.6", optional = true, features = ["cluster", "tokio-comp"] } @@ -151,6 +154,7 @@ reqwest = { version = "0.11.13", default-features = false, features = [ "stream", ] } router-bridge = "0.1.11" +rust-embed="6.4.2" schemars = { version = "0.8.11", features = ["url"] } shellexpand = "2.1.2" sha2 = "0.10.6" @@ -193,7 +197,6 @@ tracing-core = "=0.1.26" tracing-futures = { version = "0.2.5", features = ["futures-03"] } tracing-opentelemetry = "0.17.4" tracing-subscriber = { version = "0.3.11", features = ["env-filter", "json"] } - url = { version = "2.3.1", features = ["serde"] } urlencoding = "2.1.2" uuid = { version = "1.2.2", features = ["serde", "v4"] } @@ -209,7 +212,6 @@ uname = "0.1.1" [dev-dependencies] insta = { version = "1.21.1", features = ["json", "redactions"] } introspector-gadget = "0.1.0" -jsonpath_lib = "0.3.0" maplit = "1.0.2" memchr = { version = "2.5.0", default-features = false } mockall = "0.11.3" diff --git a/apollo-router/src/configuration/expansion.rs b/apollo-router/src/configuration/expansion.rs index 4414d4c6c6..0ceed33868 100644 --- a/apollo-router/src/configuration/expansion.rs +++ b/apollo-router/src/configuration/expansion.rs @@ -84,10 +84,10 @@ impl Expansion { pub(crate) fn expand_env_variables( configuration: &serde_json::Value, - expansion: Expansion, + expansion: &Expansion, ) -> Result { let mut configuration = configuration.clone(); - visit(&mut configuration, &expansion)?; + visit(&mut configuration, expansion)?; Ok(configuration) } diff --git a/apollo-router/src/configuration/migrations/0001-telemetry_router_to_supergraph.yaml b/apollo-router/src/configuration/migrations/0001-telemetry_router_to_supergraph.yaml new file mode 100644 index 0000000000..7061a07d2a --- /dev/null +++ b/apollo-router/src/configuration/migrations/0001-telemetry_router_to_supergraph.yaml @@ -0,0 +1,6 @@ +description: telemetry.tracing.trace_config.attributes.router has been renamed to 'supergraph' for consistency +actions: + - type: move + from: telemetry.metrics.common.attributes.router + to: telemetry.metrics.common.attributes.supergraph + diff --git a/apollo-router/src/configuration/migrations/README.md b/apollo-router/src/configuration/migrations/README.md new file mode 100644 index 0000000000..494831e27b --- /dev/null +++ b/apollo-router/src/configuration/migrations/README.md @@ -0,0 +1,55 @@ +# Configuration migrations +This directory contains configuration migrations that can be applied to a router to a router config to bring it up to date with current config format. + +It uses [proteus](https://github.com/rust-playground/proteus) under the hood, which handles the complexities of merging Json. + +A migration has the following format: + +The filename should begin with a 5 digit numerical prefix. This allows us to apply migrations in a deterministic order. +`Filename: 00001-name.yaml` + +The yaml consists of a description and a number of actions: +```yaml +description: telemetry.tracing.trace_config.attributes.router has been renamed to 'supergraph' for consistency +actions: + - type: move + from: some.source + to: some.destination + - type: copy + from: some.source + to: some.destination + - type: delete + path: some.destination +``` + +Each action is applied in order. Use the following formats for from, to and path. + +## Getter (from) +| syntax | description | +---------|-------------| +| | this will grab the top-level value which could be any valid type: Object, array, ... | +| id | Gets a JSON Object's name. eg. key in HashMap | +| [0] | Gets a JSON Arrays index at the specified index. | +| profile.first_name | Combine Object names with dot notation. | +| profile.address[0].street | Combinations using dot notation and indexes is also supported. | + +## Setter (to, path) +| syntax | description | +---------|-------------| +| | this will set the top-level value in the destination | +| id | By itself any text is considered to be a JSON Object's name. | +| [] | This appends the source **data** to an array, creating it if it doesn't exist and is only valid at the end of set syntax eg. profile.address[] | +| [\+] | The source Array should append all of it's values into the destination Array and is only valid at the end of set syntax eg. profile.address[] | +| [\-] | The source Array values should replace the destination Array's values at the overlapping indexes and is only valid at the end of set syntax eg. profile.address[] | +| {} | This merges the supplied Object overtop of the existing and is only valid at the end of set syntax eg. profile{} | +| profile.first_name | Combine Object names with dot notation. | +| profile.address[0].street | Combinations using dot notation and indexes is also supported. | + +See [proteus](https://github.com/rust-playground/proteus) for more options. + +If a migration is deemed to have changed the configuration then the description of the migration will be output to the user as a warning. + +In future we will be able to use these files to support offline migrations. + +# Testing +Once you have made a new migration place a config file in `testdata/migrations`. It will automatically be picked up by the `upgrade_old_configuration` test. diff --git a/apollo-router/src/configuration/mod.rs b/apollo-router/src/configuration/mod.rs index 22e5d6ebfe..7819360d3b 100644 --- a/apollo-router/src/configuration/mod.rs +++ b/apollo-router/src/configuration/mod.rs @@ -5,6 +5,7 @@ mod expansion; mod schema; #[cfg(test)] mod tests; +mod upgrade; mod yaml; use std::fmt; @@ -20,6 +21,7 @@ use displaydoc::Display; use expansion::*; use itertools::Itertools; pub(crate) use schema::generate_config_schema; +pub(crate) use schema::generate_upgrade; use schemars::gen::SchemaGenerator; use schemars::schema::ObjectValidation; use schemars::schema::Schema; @@ -32,6 +34,7 @@ use serde_json::Map; use serde_json::Value; use thiserror::Error; +use crate::configuration::schema::Mode; use crate::executable::APOLLO_ROUTER_DEV_ENV; use crate::plugin::plugins; @@ -60,6 +63,9 @@ pub enum ConfigurationError { /// APOLLO_ROUTER_CONFIG_SUPPORTED_MODES must be of the format env,file,... Possible modes are 'env' and 'file'. InvalidExpansionModeConfig, + + /// could not migrate configuration: {error}. + MigrationFailure { error: String }, } /// The configuration for the router. @@ -353,7 +359,7 @@ impl FromStr for Configuration { type Err = ConfigurationError; fn from_str(s: &str) -> Result { - schema::validate_yaml_configuration(s, Expansion::default()?)?.validate() + schema::validate_yaml_configuration(s, Expansion::default()?, Mode::Upgrade)?.validate() } } diff --git a/apollo-router/src/configuration/schema.rs b/apollo-router/src/configuration/schema.rs index 258f3fdb4c..1a737b57ec 100644 --- a/apollo-router/src/configuration/schema.rs +++ b/apollo-router/src/configuration/schema.rs @@ -18,6 +18,8 @@ use super::yaml; use super::Configuration; use super::ConfigurationError; use super::APOLLO_PLUGIN_PREFIX; +pub(crate) use crate::configuration::upgrade::generate_upgrade; +pub(crate) use crate::configuration::upgrade::upgrade_configuration; /// Generate a JSON schema for the configuration. pub(crate) fn generate_config_schema() -> RootSchema { @@ -37,6 +39,15 @@ pub(crate) fn generate_config_schema() -> RootSchema { schema } +#[derive(Eq, PartialEq)] +pub(crate) enum Mode { + Upgrade, + + // This is used only in testing to ensure that we don't allow old config in our tests. + #[cfg(test)] + NoUpgrade, +} + /// Validate config yaml against the generated json schema. /// This is a tricky problem, and the solution here is by no means complete. /// In the case that validation cannot be performed then it will let serde validate as normal. The @@ -54,6 +65,7 @@ pub(crate) fn generate_config_schema() -> RootSchema { pub(crate) fn validate_yaml_configuration( raw_yaml: &str, expansion: Expansion, + migration: Mode, ) -> Result { let defaulted_yaml = if raw_yaml.trim().is_empty() { "plugins:".to_string() @@ -61,14 +73,12 @@ pub(crate) fn validate_yaml_configuration( raw_yaml.to_string() }; - let yaml = &serde_yaml::from_str(&defaulted_yaml).map_err(|e| { + let mut yaml = serde_yaml::from_str(&defaulted_yaml).map_err(|e| { ConfigurationError::InvalidConfiguration { message: "failed to parse yaml", error: e.to_string(), } })?; - - let expanded_yaml = expand_env_variables(yaml, expansion)?; let schema = serde_json::to_value(generate_config_schema()).map_err(|e| { ConfigurationError::InvalidConfiguration { message: "failed to parse schema", @@ -82,6 +92,18 @@ pub(crate) fn validate_yaml_configuration( message: "failed to compile schema", error: e.to_string(), })?; + + if migration == Mode::Upgrade { + let upgraded = upgrade_configuration(&yaml, true)?; + let expanded_yaml = expand_env_variables(&upgraded, &expansion)?; + if schema.validate(&expanded_yaml).is_ok() { + yaml = upgraded; + } else { + tracing::warn!("configuration could not be upgraded automatically as it had errors") + } + } + let expanded_yaml = expand_env_variables(&yaml, &expansion)?; + if let Err(errors) = schema.validate(&expanded_yaml) { // Validation failed, translate the errors into something nice for the user // We have to reparse the yaml to get the line number information for each error. diff --git a/apollo-router/src/configuration/snapshots/.skipconfigvalidation b/apollo-router/src/configuration/snapshots/.skipconfigvalidation new file mode 100644 index 0000000000..e69de29bb2 diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index 8a249cfcc5..971e0c6891 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -797,257 +797,6 @@ expression: "&schema" "description": "Configuration to add custom labels/attributes to metrics", "type": "object", "properties": { - "router": { - "description": "Configuration to forward header values or body values from router request/response in metric attributes/labels", - "type": "object", - "properties": { - "context": { - "description": "Configuration to forward values from the context to custom attributes/labels in metrics", - "type": "array", - "items": { - "description": "Configuration to forward context values in metric attributes/labels", - "type": "object", - "required": [ - "named" - ], - "properties": { - "default": { - "type": "string", - "nullable": true - }, - "named": { - "type": "string" - }, - "rename": { - "type": "string", - "nullable": true - } - }, - "additionalProperties": false - }, - "nullable": true - }, - "errors": { - "description": "Configuration to forward values from the error to custom attributes/labels in metrics", - "type": "object", - "properties": { - "extensions": { - "description": "Forward extensions values as custom attributes/labels in metrics", - "type": "array", - "items": { - "description": "Configuration to forward body values in metric attributes/labels", - "type": "object", - "required": [ - "name", - "path" - ], - "properties": { - "default": { - "type": "string", - "nullable": true - }, - "name": { - "type": "string" - }, - "path": { - "type": "string" - } - }, - "additionalProperties": false - }, - "nullable": true - }, - "include_messages": { - "description": "Will include the error message in a \"message\" attribute", - "default": false, - "type": "boolean" - } - }, - "additionalProperties": false, - "nullable": true - }, - "request": { - "description": "Configuration to forward headers or body values from the request to custom attributes/labels in metrics", - "type": "object", - "properties": { - "body": { - "description": "Forward body values as custom attributes/labels in metrics", - "type": "array", - "items": { - "description": "Configuration to forward body values in metric attributes/labels", - "type": "object", - "required": [ - "name", - "path" - ], - "properties": { - "default": { - "type": "string", - "nullable": true - }, - "name": { - "type": "string" - }, - "path": { - "type": "string" - } - }, - "additionalProperties": false - }, - "nullable": true - }, - "header": { - "description": "Forward header values as custom attributes/labels in metrics", - "type": "array", - "items": { - "description": "Configuration to forward header values in metric labels", - "anyOf": [ - { - "description": "Using a named header", - "type": "object", - "required": [ - "named" - ], - "properties": { - "default": { - "type": "string", - "nullable": true - }, - "named": { - "type": "string" - }, - "rename": { - "type": "string", - "nullable": true - } - }, - "additionalProperties": false - }, - { - "description": "Using a regex on the header name", - "type": "object", - "required": [ - "matching" - ], - "properties": { - "matching": { - "type": "string" - } - }, - "additionalProperties": false - } - ] - }, - "nullable": true - } - }, - "additionalProperties": false, - "nullable": true - }, - "response": { - "description": "Configuration to forward headers or body values from the response to custom attributes/labels in metrics", - "type": "object", - "properties": { - "body": { - "description": "Forward body values as custom attributes/labels in metrics", - "type": "array", - "items": { - "description": "Configuration to forward body values in metric attributes/labels", - "type": "object", - "required": [ - "name", - "path" - ], - "properties": { - "default": { - "type": "string", - "nullable": true - }, - "name": { - "type": "string" - }, - "path": { - "type": "string" - } - }, - "additionalProperties": false - }, - "nullable": true - }, - "header": { - "description": "Forward header values as custom attributes/labels in metrics", - "type": "array", - "items": { - "description": "Configuration to forward header values in metric labels", - "anyOf": [ - { - "description": "Using a named header", - "type": "object", - "required": [ - "named" - ], - "properties": { - "default": { - "type": "string", - "nullable": true - }, - "named": { - "type": "string" - }, - "rename": { - "type": "string", - "nullable": true - } - }, - "additionalProperties": false - }, - { - "description": "Using a regex on the header name", - "type": "object", - "required": [ - "matching" - ], - "properties": { - "matching": { - "type": "string" - } - }, - "additionalProperties": false - } - ] - }, - "nullable": true - } - }, - "additionalProperties": false, - "nullable": true - }, - "static": { - "description": "Configuration to insert custom attributes/labels in metrics", - "type": "array", - "items": { - "description": "Configuration to insert custom attributes/labels in metrics", - "type": "object", - "required": [ - "name", - "value" - ], - "properties": { - "name": { - "type": "string" - }, - "value": { - "type": "string" - } - }, - "additionalProperties": false - }, - "nullable": true - } - }, - "additionalProperties": false, - "nullable": true - }, "subgraph": { "description": "Configuration to forward header values or body values from subgraph request/response in metric attributes/labels", "type": "object", @@ -1558,6 +1307,257 @@ expression: "&schema" }, "additionalProperties": false, "nullable": true + }, + "supergraph": { + "description": "Configuration to forward header values or body values from router request/response in metric attributes/labels", + "type": "object", + "properties": { + "context": { + "description": "Configuration to forward values from the context to custom attributes/labels in metrics", + "type": "array", + "items": { + "description": "Configuration to forward context values in metric attributes/labels", + "type": "object", + "required": [ + "named" + ], + "properties": { + "default": { + "type": "string", + "nullable": true + }, + "named": { + "type": "string" + }, + "rename": { + "type": "string", + "nullable": true + } + }, + "additionalProperties": false + }, + "nullable": true + }, + "errors": { + "description": "Configuration to forward values from the error to custom attributes/labels in metrics", + "type": "object", + "properties": { + "extensions": { + "description": "Forward extensions values as custom attributes/labels in metrics", + "type": "array", + "items": { + "description": "Configuration to forward body values in metric attributes/labels", + "type": "object", + "required": [ + "name", + "path" + ], + "properties": { + "default": { + "type": "string", + "nullable": true + }, + "name": { + "type": "string" + }, + "path": { + "type": "string" + } + }, + "additionalProperties": false + }, + "nullable": true + }, + "include_messages": { + "description": "Will include the error message in a \"message\" attribute", + "default": false, + "type": "boolean" + } + }, + "additionalProperties": false, + "nullable": true + }, + "request": { + "description": "Configuration to forward headers or body values from the request to custom attributes/labels in metrics", + "type": "object", + "properties": { + "body": { + "description": "Forward body values as custom attributes/labels in metrics", + "type": "array", + "items": { + "description": "Configuration to forward body values in metric attributes/labels", + "type": "object", + "required": [ + "name", + "path" + ], + "properties": { + "default": { + "type": "string", + "nullable": true + }, + "name": { + "type": "string" + }, + "path": { + "type": "string" + } + }, + "additionalProperties": false + }, + "nullable": true + }, + "header": { + "description": "Forward header values as custom attributes/labels in metrics", + "type": "array", + "items": { + "description": "Configuration to forward header values in metric labels", + "anyOf": [ + { + "description": "Using a named header", + "type": "object", + "required": [ + "named" + ], + "properties": { + "default": { + "type": "string", + "nullable": true + }, + "named": { + "type": "string" + }, + "rename": { + "type": "string", + "nullable": true + } + }, + "additionalProperties": false + }, + { + "description": "Using a regex on the header name", + "type": "object", + "required": [ + "matching" + ], + "properties": { + "matching": { + "type": "string" + } + }, + "additionalProperties": false + } + ] + }, + "nullable": true + } + }, + "additionalProperties": false, + "nullable": true + }, + "response": { + "description": "Configuration to forward headers or body values from the response to custom attributes/labels in metrics", + "type": "object", + "properties": { + "body": { + "description": "Forward body values as custom attributes/labels in metrics", + "type": "array", + "items": { + "description": "Configuration to forward body values in metric attributes/labels", + "type": "object", + "required": [ + "name", + "path" + ], + "properties": { + "default": { + "type": "string", + "nullable": true + }, + "name": { + "type": "string" + }, + "path": { + "type": "string" + } + }, + "additionalProperties": false + }, + "nullable": true + }, + "header": { + "description": "Forward header values as custom attributes/labels in metrics", + "type": "array", + "items": { + "description": "Configuration to forward header values in metric labels", + "anyOf": [ + { + "description": "Using a named header", + "type": "object", + "required": [ + "named" + ], + "properties": { + "default": { + "type": "string", + "nullable": true + }, + "named": { + "type": "string" + }, + "rename": { + "type": "string", + "nullable": true + } + }, + "additionalProperties": false + }, + { + "description": "Using a regex on the header name", + "type": "object", + "required": [ + "matching" + ], + "properties": { + "matching": { + "type": "string" + } + }, + "additionalProperties": false + } + ] + }, + "nullable": true + } + }, + "additionalProperties": false, + "nullable": true + }, + "static": { + "description": "Configuration to insert custom attributes/labels in metrics", + "type": "array", + "items": { + "description": "Configuration to insert custom attributes/labels in metrics", + "type": "object", + "required": [ + "name", + "value" + ], + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "additionalProperties": false + }, + "nullable": true + } + }, + "additionalProperties": false, + "nullable": true } }, "additionalProperties": false, diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@telemetry_router_to_supergraph.router.yaml.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@telemetry_router_to_supergraph.router.yaml.snap new file mode 100644 index 0000000000..1db53b0e56 --- /dev/null +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__upgrade_old_configuration@telemetry_router_to_supergraph.router.yaml.snap @@ -0,0 +1,14 @@ +--- +source: apollo-router/src/configuration/tests.rs +expression: new_config +--- +--- +telemetry: + metrics: + common: + attributes: + supergraph: + request: + header: + - named: fd + diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__copy_array_element.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__copy_array_element.snap new file mode 100644 index 0000000000..7455fbbda0 --- /dev/null +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__copy_array_element.snap @@ -0,0 +1,19 @@ +--- +source: apollo-router/src/configuration/upgrade.rs +expression: "apply_migration(&source_doc(),\n &Migration::builder().action(Action::Copy {\n from: \"arr[0]\".to_string(),\n to: \"new.arr[0]\".to_string(),\n }).description(\"copy arr[0]\").build()).expect(\"expected successful migration\")" +--- +{ + "obj": { + "field1": 1, + "field2": 2 + }, + "arr": [ + "v1", + "v2" + ], + "new": { + "arr": [ + "v1" + ] + } +} diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__copy_field.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__copy_field.snap new file mode 100644 index 0000000000..029e4e814a --- /dev/null +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__copy_field.snap @@ -0,0 +1,19 @@ +--- +source: apollo-router/src/configuration/upgrade.rs +expression: "apply_migration(&source_doc(),\n &Migration::builder().action(Action::Copy {\n from: \"obj.field1\".to_string(),\n to: \"new.obj.field1\".to_string(),\n }).description(\"copy field1\").build()).expect(\"expected successful migration\")" +--- +{ + "obj": { + "field1": 1, + "field2": 2 + }, + "arr": [ + "v1", + "v2" + ], + "new": { + "obj": { + "field1": 1 + } + } +} diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__delete_array_element.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__delete_array_element.snap new file mode 100644 index 0000000000..850c1bb543 --- /dev/null +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__delete_array_element.snap @@ -0,0 +1,13 @@ +--- +source: apollo-router/src/configuration/upgrade.rs +expression: "apply_migration(&source_doc(),\n &Migration::builder().action(Action::Delete {\n path: \"arr[0]\".to_string(),\n }).description(\"delete arr[0]\").build()).expect(\"expected successful migration\")" +--- +{ + "obj": { + "field1": 1, + "field2": 2 + }, + "arr": [ + "v2" + ] +} diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__delete_field.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__delete_field.snap new file mode 100644 index 0000000000..d3e1045312 --- /dev/null +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__delete_field.snap @@ -0,0 +1,13 @@ +--- +source: apollo-router/src/configuration/upgrade.rs +expression: "apply_migration(&source_doc(),\n &Migration::builder().action(Action::Delete {\n path: \"obj.field1\".to_string(),\n }).description(\"delete field1\").build()).expect(\"expected successful migration\")" +--- +{ + "obj": { + "field2": 2 + }, + "arr": [ + "v1", + "v2" + ] +} diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__diff_upgrade_output.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__diff_upgrade_output.snap new file mode 100644 index 0000000000..7d35fea46b --- /dev/null +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__diff_upgrade_output.snap @@ -0,0 +1,10 @@ +--- +source: apollo-router/src/configuration/upgrade.rs +expression: "generate_upgrade_output(\"changed: bar\\nstable: 1.0\\ndeleted: gone\",\n \"changed: bif\\nstable: 1.0\\nadded: new\",\n true).expect(\"expected successful migration\")" +--- +-changed: bar ++changed: bif + stable: 1.0 +-deleted: gone ++added: new + diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__move_array_element.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__move_array_element.snap new file mode 100644 index 0000000000..f8e479e750 --- /dev/null +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__move_array_element.snap @@ -0,0 +1,18 @@ +--- +source: apollo-router/src/configuration/upgrade.rs +expression: "apply_migration(&source_doc(),\n &Migration::builder().action(Action::Move {\n from: \"arr[0]\".to_string(),\n to: \"new.arr[0]\".to_string(),\n }).description(\"move arr[0]\").build()).expect(\"expected successful migration\")" +--- +{ + "obj": { + "field1": 1, + "field2": 2 + }, + "arr": [ + "v2" + ], + "new": { + "arr": [ + "v1" + ] + } +} diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__move_field.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__move_field.snap new file mode 100644 index 0000000000..e8136fb407 --- /dev/null +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__move_field.snap @@ -0,0 +1,18 @@ +--- +source: apollo-router/src/configuration/upgrade.rs +expression: "apply_migration(&source_doc(),\n &Migration::builder().action(Action::Move {\n from: \"obj.field1\".to_string(),\n to: \"new.obj.field1\".to_string(),\n }).description(\"move field1\").build()).expect(\"expected successful migration\")" +--- +{ + "obj": { + "field2": 2 + }, + "arr": [ + "v1", + "v2" + ], + "new": { + "obj": { + "field1": 1 + } + } +} diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__upgrade_output.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__upgrade_output.snap new file mode 100644 index 0000000000..2f1e6fab76 --- /dev/null +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__upgrade_output.snap @@ -0,0 +1,8 @@ +--- +source: apollo-router/src/configuration/upgrade.rs +expression: "generate_upgrade_output(\"changed: bar\\nstable: 1.0\\ndeleted: gone\",\n \"changed: bif\\nstable: 1.0\\nadded: new\",\n false).expect(\"expected successful migration\")" +--- +changed: bif +stable: 1.0 +added: new + diff --git a/apollo-router/src/configuration/testdata/migrations/.skipconfigvalidation b/apollo-router/src/configuration/testdata/migrations/.skipconfigvalidation new file mode 100644 index 0000000000..e69de29bb2 diff --git a/apollo-router/src/configuration/testdata/migrations/telemetry_router_to_supergraph.router.yaml b/apollo-router/src/configuration/testdata/migrations/telemetry_router_to_supergraph.router.yaml new file mode 100644 index 0000000000..0e44d606a6 --- /dev/null +++ b/apollo-router/src/configuration/testdata/migrations/telemetry_router_to_supergraph.router.yaml @@ -0,0 +1,8 @@ +telemetry: + metrics: + common: + attributes: + router: + request: + header: + - named: "fd" diff --git a/apollo-router/src/configuration/tests.rs b/apollo-router/src/configuration/tests.rs index c05a8e6434..b2fb1733dd 100644 --- a/apollo-router/src/configuration/tests.rs +++ b/apollo-router/src/configuration/tests.rs @@ -6,6 +6,7 @@ use http::Uri; #[cfg(unix)] use insta::assert_json_snapshot; use regex::Regex; +use rust_embed::RustEmbed; #[cfg(unix)] use schemars::gen::SchemaSettings; use walkdir::DirEntry; @@ -166,6 +167,7 @@ subgraphs: account: true "#, Expansion::default().unwrap(), + Mode::NoUpgrade, ) .expect_err("should have resulted in an error"); assert_eq!(error.to_string(), String::from("unknown fields: additional properties are not allowed ('subgraphs' was/were unexpected)")); @@ -179,6 +181,7 @@ unknown: foo: true "#, Expansion::default().unwrap(), + Mode::NoUpgrade, ) .expect_err("should have resulted in an error"); assert_eq!( @@ -195,6 +198,7 @@ fn empty_config() { r#" "#, Expansion::default().unwrap(), + Mode::NoUpgrade, ) .expect("should have been ok with an empty config"); } @@ -221,6 +225,7 @@ telemetry: another_non_existant: 3 "#, Expansion::default().unwrap(), + Mode::NoUpgrade, ) .expect_err("should have resulted in an error"); insta::assert_snapshot!(error.to_string()); @@ -238,6 +243,7 @@ supergraph: another_one: true "#, Expansion::default().unwrap(), + Mode::NoUpgrade, ) .expect_err("should have resulted in an error"); insta::assert_snapshot!(error.to_string()); @@ -253,6 +259,7 @@ supergraph: listen: true "#, Expansion::default().unwrap(), + Mode::NoUpgrade, ) .expect_err("should have resulted in an error"); insta::assert_snapshot!(error.to_string()); @@ -270,6 +277,7 @@ cors: allow_headers: [ Content-Type, 5 ] "#, Expansion::default().unwrap(), + Mode::NoUpgrade, ) .expect_err("should have resulted in an error"); insta::assert_snapshot!(error.to_string()); @@ -289,6 +297,7 @@ cors: - 5 "#, Expansion::default().unwrap(), + Mode::NoUpgrade, ) .expect_err("should have resulted in an error"); insta::assert_snapshot!(error.to_string()); @@ -303,6 +312,7 @@ cors: allow_headers: [ "*" ] "#, Expansion::default().unwrap(), + Mode::NoUpgrade, ) .expect("should not have resulted in an error"); let error = cfg @@ -321,6 +331,7 @@ cors: methods: [ GET, "*" ] "#, Expansion::default().unwrap(), + Mode::NoUpgrade, ) .expect("should not have resulted in an error"); let error = cfg @@ -339,6 +350,7 @@ cors: allow_any_origin: true "#, Expansion::default().unwrap(), + Mode::NoUpgrade, ) .expect("should not have resulted in an error"); let error = cfg @@ -401,7 +413,11 @@ fn validate_project_config_files() { }; for yaml in yamls { - if let Err(e) = validate_yaml_configuration(&yaml, Expansion::default().unwrap()) { + if let Err(e) = validate_yaml_configuration( + &yaml, + Expansion::default().unwrap(), + Mode::NoUpgrade, + ) { panic!( "{} configuration error: \n{}", entry.path().to_string_lossy(), @@ -422,6 +438,7 @@ supergraph: introspection: ${env.TEST_CONFIG_NUMERIC_ENV_UNIQUE:-true} "#, Expansion::default().unwrap(), + Mode::NoUpgrade, ) .expect_err("Must have an error because we expect a boolean"); insta::assert_snapshot!(error.to_string()); @@ -440,6 +457,7 @@ cors: allow_headers: [ Content-Type, "${env.TEST_CONFIG_NUMERIC_ENV_UNIQUE}" ] "#, Expansion::default().unwrap(), + Mode::NoUpgrade, ) .expect_err("should have resulted in an error"); insta::assert_snapshot!(error.to_string()); @@ -461,6 +479,7 @@ cors: - "${env.TEST_CONFIG_NUMERIC_ENV_UNIQUE:-true}" "#, Expansion::default().unwrap(), + Mode::NoUpgrade, ) .expect_err("should have resulted in an error"); insta::assert_snapshot!(error.to_string()); @@ -478,6 +497,7 @@ supergraph: another_one: foo "#, Expansion::default().unwrap(), + Mode::NoUpgrade, ) .expect_err("should have resulted in an error"); insta::assert_snapshot!(error.to_string()); @@ -491,6 +511,7 @@ supergraph: introspection: ${env.TEST_CONFIG_UNKNOWN_WITH_NO_DEFAULT} "#, Expansion::default().unwrap(), + Mode::NoUpgrade, ) .expect_err("must have an error because the env variable is unknown"); insta::assert_snapshot!(error.to_string()); @@ -507,6 +528,7 @@ supergraph: .prefix("TEST_CONFIG") .supported_mode("env") .build(), + Mode::NoUpgrade, ) .expect_err("must have an error because the mode is unknown"); insta::assert_snapshot!(error.to_string()); @@ -524,6 +546,7 @@ supergraph: .prefix("TEST_CONFIG") .supported_mode("env") .build(), + Mode::NoUpgrade, ) .expect("must have expanded successfully"); } @@ -544,8 +567,49 @@ supergraph: path.to_string_lossy() ), Expansion::builder().supported_mode("file").build(), + Mode::NoUpgrade, ) .expect("must have expanded successfully"); assert!(config.supergraph.introspection); } + +#[derive(RustEmbed)] +#[folder = "src/configuration/testdata/migrations"] +struct Asset; + +#[test] +fn upgrade_old_configuration() { + for file_name in Asset::iter() { + if file_name.ends_with(".yaml") { + let source = Asset::get(&file_name).expect("test file must exist"); + let input = std::str::from_utf8(&source.data) + .expect("expected utf8") + .to_string(); + let new_config = crate::configuration::upgrade::upgrade_configuration( + &serde_yaml::from_str(&input).expect("config must be valid yaml"), + true, + ) + .expect("configuration could not be updated"); + let new_config = + serde_yaml::to_string(&new_config).expect("must be able to serialize config"); + + let result = validate_yaml_configuration( + &new_config, + Expansion::builder().build(), + Mode::NoUpgrade, + ); + + match result { + Ok(_) => { + insta::with_settings!({snapshot_suffix => file_name}, { + insta::assert_snapshot!(new_config) + }); + } + Err(e) => { + panic!("migrated configuration had validation errors:\n{}\n\noriginal configuration:\n{}\n\nmigrated configuration:\n{}", e, input, new_config) + } + } + } + } +} diff --git a/apollo-router/src/configuration/upgrade.rs b/apollo-router/src/configuration/upgrade.rs new file mode 100644 index 0000000000..9ed15c5ea8 --- /dev/null +++ b/apollo-router/src/configuration/upgrade.rs @@ -0,0 +1,325 @@ +use std::fmt::Write as _; + +use itertools::Itertools; +use proteus::Parser; +use proteus::TransformBuilder; +use rust_embed::RustEmbed; +use serde::Deserialize; +use serde_json::Value; + +use crate::error::ConfigurationError; + +#[derive(RustEmbed)] +#[folder = "src/configuration/migrations"] +struct Asset; + +#[derive(Deserialize, buildstructor::Builder)] +struct Migration { + description: String, + actions: Vec, +} + +#[derive(Deserialize)] +#[serde(tag = "type", rename_all = "snake_case")] +enum Action { + Delete { path: String }, + Copy { from: String, to: String }, + Move { from: String, to: String }, +} + +const REMOVAL_VALUE: &str = "__PLEASE_DELETE_ME"; +const REMOVAL_EXPRESSION: &str = r#"const("__PLEASE_DELETE_ME")"#; + +pub(crate) fn upgrade_configuration( + config: &serde_json::Value, + log_warnings: bool, +) -> Result { + // Transformers are loaded from a file and applied in order + let migrations: Vec = Asset::iter() + .sorted() + .filter(|filename| filename.ends_with(".yaml")) + .map(|filename| Asset::get(&filename).expect("migration must exist").data) + .map(|data| serde_yaml::from_slice(&data).expect("migration must be valid")) + .collect(); + + let mut config = config.clone(); + + let mut effective_migrations = Vec::new(); + for migration in &migrations { + let new_config = apply_migration(&config, migration)?; + + // If the config has been modified by the migration then let the user know + if new_config != config { + effective_migrations.push(migration); + } + + // Get ready for the next migration + config = new_config; + } + if !effective_migrations.is_empty() && log_warnings { + tracing::warn!("router configuration contains deprecated options: \n\n{}\n\nThese will become errors in the future. Run `router config upgrade ` to see a suggested upgraded configuration.", effective_migrations.iter().enumerate().map(|(idx, m)|format!(" {}. {}", idx + 1, m.description)).join("\n\n")); + } + Ok(config) +} + +fn apply_migration(config: &Value, migration: &Migration) -> Result { + let mut transformer_builder = TransformBuilder::default(); + //We always copy the entire doc to the destination first + transformer_builder = + transformer_builder.add_action(Parser::parse("", "").expect("migration must be valid")); + for action in &migration.actions { + match action { + Action::Delete { path } => { + // Deleting isn't actually supported by protus so we add a magic value to delete later + transformer_builder = transformer_builder.add_action( + Parser::parse(REMOVAL_EXPRESSION, path).expect("migration must be valid"), + ); + } + Action::Copy { from, to } => { + transformer_builder = transformer_builder + .add_action(Parser::parse(from, to).expect("migration must be valid")); + } + Action::Move { from, to } => { + transformer_builder = transformer_builder + .add_action(Parser::parse(from, to).expect("migration must be valid")); + // Deleting isn't actually supported by protus so we add a magic value to delete later + transformer_builder = transformer_builder.add_action( + Parser::parse(REMOVAL_EXPRESSION, from).expect("migration must be valid"), + ); + } + } + } + let transformer = transformer_builder + .build() + .expect("transformer for migration must be valid"); + let mut new_config = + transformer + .apply(config) + .map_err(|e| ConfigurationError::MigrationFailure { + error: e.to_string(), + })?; + + // Now we need to clean up elements that should be deleted. + cleanup(&mut new_config); + + Ok(new_config) +} + +pub(crate) fn generate_upgrade(config: &str, diff: bool) -> Result { + let parsed_config = + serde_yaml::from_str(config).map_err(|e| ConfigurationError::MigrationFailure { + error: e.to_string(), + })?; + let upgraded_config = upgrade_configuration(&parsed_config, true).map_err(|e| { + ConfigurationError::MigrationFailure { + error: e.to_string(), + } + })?; + let upgraded_config = serde_yaml::to_string(&upgraded_config).map_err(|e| { + ConfigurationError::MigrationFailure { + error: e.to_string(), + } + })?; + generate_upgrade_output(config, &upgraded_config, diff) +} + +pub(crate) fn generate_upgrade_output( + config: &str, + upgraded_config: &str, + diff: bool, +) -> Result { + // serde doesn't deal with whitespace and comments, these are lost in the upgrade process, so instead we try and preserve this in the diff. + // It's not ideal, and ideally the upgrade process should work on a DOM that is not serde, but for now we just make a best effort to preserve comments and whitespace. + // There absolutely are issues where comments will get stripped, but the output should be `correct`. + let mut output = String::new(); + + let diff_result = diff::lines(config, upgraded_config); + + for diff_line in diff_result { + match diff_line { + diff::Result::Left(l) => { + let trimmed = l.trim(); + if !trimmed.starts_with('#') && !trimmed.is_empty() { + if diff { + writeln!(output, "-{}", l).expect("write will never fail"); + } + } else if diff { + writeln!(output, " {}", l).expect("write will never fail"); + } else { + writeln!(output, "{}", l).expect("write will never fail"); + } + } + diff::Result::Both(l, _) => { + if diff { + writeln!(output, " {}", l).expect("write will never fail"); + } else { + writeln!(output, "{}", l).expect("write will never fail"); + } + } + diff::Result::Right(r) => { + let trimmed = r.trim(); + if trimmed != "---" && !trimmed.is_empty() { + if diff { + writeln!(output, "+{}", r).expect("write will never fail"); + } else { + writeln!(output, "{}", r).expect("write will never fail"); + } + } + } + } + } + Ok(output) +} + +fn cleanup(value: &mut Value) { + match value { + Value::Null => {} + Value::Bool(_) => {} + Value::Number(_) => {} + Value::String(_) => {} + Value::Array(a) => { + a.retain(|v| &Value::String(REMOVAL_VALUE.to_string()) != v); + for value in a { + cleanup(value); + } + } + Value::Object(o) => { + o.retain(|_, v| &Value::String(REMOVAL_VALUE.to_string()) != v); + for value in o.values_mut() { + cleanup(value); + } + } + } +} + +#[cfg(test)] +mod test { + use serde_json::json; + use serde_json::Value; + + use crate::configuration::upgrade::apply_migration; + use crate::configuration::upgrade::generate_upgrade_output; + use crate::configuration::upgrade::Action; + use crate::configuration::upgrade::Migration; + + fn source_doc() -> Value { + json!( { + "obj": { + "field1": 1, + "field2": 2 + }, + "arr": [ + "v1", + "v2" + ] + }) + } + + #[test] + fn delete_field() { + insta::assert_json_snapshot!(apply_migration( + &source_doc(), + &Migration::builder() + .action(Action::Delete { + path: "obj.field1".to_string() + }) + .description("delete field1") + .build(), + ) + .expect("expected successful migration")); + } + + #[test] + fn delete_array_element() { + insta::assert_json_snapshot!(apply_migration( + &source_doc(), + &Migration::builder() + .action(Action::Delete { + path: "arr[0]".to_string() + }) + .description("delete arr[0]") + .build(), + ) + .expect("expected successful migration")); + } + + #[test] + fn move_field() { + insta::assert_json_snapshot!(apply_migration( + &source_doc(), + &Migration::builder() + .action(Action::Move { + from: "obj.field1".to_string(), + to: "new.obj.field1".to_string() + }) + .description("move field1") + .build(), + ) + .expect("expected successful migration")); + } + + #[test] + fn move_array_element() { + insta::assert_json_snapshot!(apply_migration( + &source_doc(), + &Migration::builder() + .action(Action::Move { + from: "arr[0]".to_string(), + to: "new.arr[0]".to_string() + }) + .description("move arr[0]") + .build(), + ) + .expect("expected successful migration")); + } + + #[test] + fn copy_field() { + insta::assert_json_snapshot!(apply_migration( + &source_doc(), + &Migration::builder() + .action(Action::Copy { + from: "obj.field1".to_string(), + to: "new.obj.field1".to_string() + }) + .description("copy field1") + .build(), + ) + .expect("expected successful migration")); + } + + #[test] + fn copy_array_element() { + insta::assert_json_snapshot!(apply_migration( + &source_doc(), + &Migration::builder() + .action(Action::Copy { + from: "arr[0]".to_string(), + to: "new.arr[0]".to_string() + }) + .description("copy arr[0]") + .build(), + ) + .expect("expected successful migration")); + } + + #[test] + fn diff_upgrade_output() { + insta::assert_snapshot!(generate_upgrade_output( + "changed: bar\nstable: 1.0\ndeleted: gone", + "changed: bif\nstable: 1.0\nadded: new", + true + ) + .expect("expected successful migration")); + } + + #[test] + fn upgrade_output() { + insta::assert_snapshot!(generate_upgrade_output( + "changed: bar\nstable: 1.0\ndeleted: gone", + "changed: bif\nstable: 1.0\nadded: new", + false + ) + .expect("expected successful migration")); + } +} diff --git a/apollo-router/src/executable.rs b/apollo-router/src/executable.rs index cef3b3bf61..dc807b0091 100644 --- a/apollo-router/src/executable.rs +++ b/apollo-router/src/executable.rs @@ -12,8 +12,10 @@ use anyhow::Context; use anyhow::Result; use clap::AppSettings; use clap::ArgAction; +use clap::Args; use clap::CommandFactory; use clap::Parser; +use clap::Subcommand; use directories::ProjectDirs; use once_cell::sync::OnceCell; use tracing::dispatcher::with_default; @@ -24,6 +26,7 @@ use url::ParseError; use url::Url; use crate::configuration::generate_config_schema; +use crate::configuration::generate_upgrade; use crate::configuration::Configuration; use crate::configuration::ConfigurationError; use crate::router::ConfigurationSource; @@ -97,6 +100,37 @@ extern "C" fn drop_ad_hoc_profiler() { } } +/// Subcommands +#[derive(Subcommand, Debug)] +enum Commands { + /// Configuration subcommands. + Config(ConfigSubcommandArgs), +} + +#[derive(Args, Debug)] +struct ConfigSubcommandArgs { + /// Subcommands + #[clap(subcommand)] + command: ConfigSubcommand, +} + +#[derive(Subcommand, Debug)] +enum ConfigSubcommand { + /// Print the json configuration schema. + Schema, + + /// Print upgraded configuration. + Upgrade { + /// The location of the config to upgrade. + #[clap(parse(from_os_str), env = "APOLLO_ROUTER_CONFIG_PATH")] + config_path: PathBuf, + + /// Print a diff. + #[clap(parse(from_flag), long)] + diff: bool, + }, +} + /// Options for the router #[derive(Parser, Debug)] #[clap( @@ -151,9 +185,13 @@ pub(crate) struct Opt { supergraph_path: Option, /// Prints the configuration schema. - #[clap(long, action(ArgAction::SetTrue))] + #[clap(long, action(ArgAction::SetTrue), hide(true))] schema: bool, + /// Subcommands + #[clap(subcommand)] + command: Option, + /// Your Apollo key. #[clap(skip = std::env::var("APOLLO_KEY").ok())] apollo_key: Option, @@ -294,12 +332,6 @@ impl Executable { copy_args_to_env(); - if opt.schema { - let schema = generate_config_schema(); - println!("{}", serde_json::to_string_pretty(&schema)?); - return Ok(()); - } - let builder = tracing_subscriber::fmt::fmt().with_env_filter( EnvFilter::try_new(&opt.log_level).context("could not parse log configuration")?, ); @@ -315,15 +347,41 @@ impl Executable { }; GLOBAL_ENV_FILTER.set(opt.log_level.clone()).expect( - "failed setting the global env filter. THe start() function should only be called once", + "failed setting the global env filter. The start() function should only be called once", ); - // The dispatcher we created is passed explicitely here to make sure we display the logs - // in the initialization phase and in the state machine code, before a global subscriber - // is set using the configuration file - Self::inner_start(shutdown, schema, config, opt, dispatcher.clone()) - .with_subscriber(dispatcher) - .await + if opt.schema { + eprintln!("`router --schema` is deprecated. Use `router config schema`"); + let schema = generate_config_schema(); + println!("{}", serde_json::to_string_pretty(&schema)?); + return Ok(()); + } + + match opt.command.as_ref() { + Some(Commands::Config(ConfigSubcommandArgs { + command: ConfigSubcommand::Schema, + })) => { + let schema = generate_config_schema(); + println!("{}", serde_json::to_string_pretty(&schema)?); + Ok(()) + } + Some(Commands::Config(ConfigSubcommandArgs { + command: ConfigSubcommand::Upgrade { config_path, diff }, + })) => { + let config_string = std::fs::read_to_string(config_path)?; + let output = generate_upgrade(&config_string, *diff)?; + println!("{}", output); + Ok(()) + } + None => { + // The dispatcher we created is passed explicitly here to make sure we display the logs + // in the initialization phase and in the state machine code, before a global subscriber + // is set using the configuration file + Self::inner_start(shutdown, schema, config, opt, dispatcher.clone()) + .with_subscriber(dispatcher) + .await + } + } } async fn inner_start( diff --git a/apollo-router/src/plugins/telemetry/metrics/mod.rs b/apollo-router/src/plugins/telemetry/metrics/mod.rs index 01a9bccb59..97fa39ca13 100644 --- a/apollo-router/src/plugins/telemetry/metrics/mod.rs +++ b/apollo-router/src/plugins/telemetry/metrics/mod.rs @@ -43,7 +43,7 @@ pub(crate) type MetricsExporterHandle = Box; /// Configuration to add custom attributes/labels on metrics pub(crate) struct MetricsAttributesConf { /// Configuration to forward header values or body values from router request/response in metric attributes/labels - pub(crate) router: Option, + pub(crate) supergraph: Option, /// Configuration to forward header values or body values from subgraph request/response in metric attributes/labels pub(crate) subgraph: Option, } diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index aa0d921c9a..d9d7566520 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -761,7 +761,7 @@ impl Telemetry { if let Some(MetricsCommon { attributes: Some(MetricsAttributesConf { - router: Some(forward_attributes), + supergraph: Some(forward_attributes), .. }), .. @@ -840,7 +840,7 @@ impl Telemetry { .common .as_ref() .and_then(|c| c.attributes.as_ref()) - .and_then(|a| a.router.as_ref()) + .and_then(|a| a.supergraph.as_ref()) { attributes.extend( router_attributes_conf @@ -1041,7 +1041,7 @@ impl Telemetry { .as_ref() .and_then(|m| m.common.as_ref()) .and_then(|c| c.attributes.as_ref()) - .and_then(|c| c.router.as_ref()) + .and_then(|c| c.supergraph.as_ref()) { metric_attrs.extend( subgraph_attributes_conf @@ -1312,7 +1312,7 @@ mod tests { "metrics": { "common": { "attributes": { - "router": { + "supergraph": { "static": [ { "name": "myname", @@ -1513,7 +1513,7 @@ mod tests { "common": { "service_name": "apollo-router", "attributes": { - "router": { + "supergraph": { "static": [ { "name": "myname", diff --git a/docs/source/configuration/metrics.mdx b/docs/source/configuration/metrics.mdx index dc70a6d79f..3d6517633f 100644 --- a/docs/source/configuration/metrics.mdx +++ b/docs/source/configuration/metrics.mdx @@ -92,7 +92,7 @@ telemetry: metrics: common: attributes: - router: # Attribute configuration for requests to/responses from the router + supergraph: # Attribute configuration for requests to/responses from the router static: - name: "version" value: "v1.0.0" diff --git a/docs/source/configuration/overview.mdx b/docs/source/configuration/overview.mdx index b81dc9bc89..2a3d8043a2 100644 --- a/docs/source/configuration/overview.mdx +++ b/docs/source/configuration/overview.mdx @@ -182,7 +182,7 @@ The default value is `10s` (ten seconds), which is also the minimum allowed valu -##### `--schema` +##### `schema` @@ -208,6 +208,48 @@ Prints out the Apollo Router's version. +## Config subcommand + + + + + + + + + + + + + + + + + + + + + + +
Argument / Environment VariableDescription
+ +##### `schema` + + + +Prints out a JSON schema of the Router's configuration file, including [plugin configuration](#plugins). + +
+ +##### `upgrade` + + + +Print configuration that has been upgraded to the current Router version. + +
+ + ## YAML config file The Apollo Router takes an optional YAML configuration file as input via the `--config` option. If the `--hot-reload` flag is also passed (or the `APOLLO_ROUTER_HOT_RELOAD` environment variable is set to `true`), the router automatically restarts when changes to the configuration file are made. @@ -389,7 +431,7 @@ The Apollo Router can generate a JSON schema for config validation in your text Generate the schema with the following command: ```bash -./router --schema > configuration_schema.json +./router config schema > configuration_schema.json ``` After you generate the schema, configure your text editor. Here are the instructions for some commonly used editors: @@ -399,3 +441,25 @@ After you generate the schema, configure your text editor. Here are the instruct - [IntelliJ](https://www.jetbrains.com/help/idea/json.html#ws_json_using_schemas) - [Sublime](https://github.com/sublimelsp/LSP-yaml) - [Vim](https://github.com/Quramy/vison) + +## Upgrading your Router configuration + +Occasionally breaking changes are made to the Apollo Router yaml format. Usually to extend functionality or improve usability. + +When running the Router with old configuration: + +1. if you have errors in your config then a warning will be emitted on startup. +2. if the Router config can be upgraded automatically with no validation errors then it will continue to load. +3. if the Router config had validation errors after automatic upgrade loading will stop. + +When you are notified that configuration can be upgraded use the `router config upgrade` command to see what your configuration should look like: + +```bash +./router config upgrade +``` + +To see what changes were made output the upgraded config diff using: + +```bash +./router config upgrade --diff +``` From 562f7ff233e62c6d33505d4c233307ee3a9dd8ff Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 23 Nov 2022 21:51:03 +0000 Subject: [PATCH 16/45] fix(deps): update dependency @apollo/server to v4.2.1 --- dockerfiles/fed2-demo-gateway/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dockerfiles/fed2-demo-gateway/package.json b/dockerfiles/fed2-demo-gateway/package.json index 4a80957a91..6ca965a8fe 100644 --- a/dockerfiles/fed2-demo-gateway/package.json +++ b/dockerfiles/fed2-demo-gateway/package.json @@ -7,7 +7,7 @@ "start": "node gateway.js" }, "dependencies": { - "@apollo/server": "4.1.1", + "@apollo/server": "4.2.1", "@apollo/gateway": "2.2.1", "supergraph-demo-opentelemetry": "0.2.4", "graphql": "16.6.0" From 6e18e3aff0bf0d2947eec1f6a998a610246edad3 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 24 Nov 2022 03:14:25 +0000 Subject: [PATCH 17/45] fix(deps): update rust crate miette to 5.5.0 --- Cargo.lock | 12 ++++++------ apollo-router/Cargo.toml | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 077f8eba72..7d183c0cd6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -190,7 +190,7 @@ dependencies = [ "maplit", "mediatype", "memchr", - "miette 5.4.1", + "miette 5.5.0", "mime", "mockall", "multimap", @@ -2710,13 +2710,13 @@ dependencies = [ [[package]] name = "miette" -version = "5.4.1" +version = "5.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a24c4b4063c21e037dffb4de388ee85e400bff299803aba9513d9c52de8116b" +checksum = "4afd9b301defa984bbdbe112b4763e093ed191750a0d914a78c1106b2d0fe703" dependencies = [ "atty", "backtrace", - "miette-derive 5.4.1", + "miette-derive 5.5.0", "once_cell", "owo-colors", "supports-color", @@ -2741,9 +2741,9 @@ dependencies = [ [[package]] name = "miette-derive" -version = "5.4.1" +version = "5.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "827d18edee5d43dc309eb0ac565f2b8e2fdc89b986b2d929e924a0f6e7f23835" +checksum = "97c2401ab7ac5282ca5c8b518a87635b1a93762b0b90b9990c509888eeccba29" dependencies = [ "proc-macro2", "quote", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 4434729778..7604086531 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -92,7 +92,7 @@ libc = "0.2.137" lru = "0.7.8" mediatype = "0.19.11" mockall = "0.11.3" -miette = { version = "5.4.1", features = ["fancy"] } +miette = { version = "5.5.0", features = ["fancy"] } mime = "0.3.16" multimap = "0.8.3" # To avoid tokio issues From c1569e83e6bc85790c7b82b6aa58774671953abf Mon Sep 17 00:00:00 2001 From: Jeremy Lempereur Date: Thu, 24 Nov 2022 12:32:47 +0100 Subject: [PATCH 18/45] Docs: Update cors match regex example: (#2152) fixes #2151 The docs match regex example was wrong, it was missing ^ at the beginning and $ at the end. `$` marks the end of a string, which could be a safety issue. --- NEXT_CHANGELOG.md | 7 +++++++ docs/source/configuration/cors.mdx | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 8885cd66e2..5655e1f346 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -145,6 +145,13 @@ By [@col](https://github.com/col) in https://github.com/apollographql/router/pul ## 🛠 Maintenance ## 📚 Documentation +### Docs: Update cors match regex example ([Issue #2151](https://github.com/apollographql/router/issues/2151)) + +The docs CORS regex example now displays a working and safe way to allow `HTTPS` subdomains of `api.example.com`. + +By [@col](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/2152 + + ### update documentation to reflect new examples structure ([Issue #2095](https://github.com/apollographql/router/pull/2133)) We recently updated the examples directory structure. This fixes the documentation links to the examples. It also makes clear that rhai subgraph fields are read-only, since they are shared resources. diff --git a/docs/source/configuration/cors.mdx b/docs/source/configuration/cors.mdx index 9b1637085e..ce399cf523 100644 --- a/docs/source/configuration/cors.mdx +++ b/docs/source/configuration/cors.mdx @@ -40,7 +40,7 @@ cors: - https://www.your-app.example.com - https://studio.apollographql.com # Keep this so Apollo Studio can run queries against your router match_origins: - - "https://([a-z0-9]+[.])*api[.]example[.]com" # any host that uses https and ends with .api.example.com + - "^https://([a-z0-9]+[.])*api[.]example[.]com$" # any host that uses https and ends with .api.example.com ``` You can also disable CORS entirely by setting `origins` to an empty list: From a4f0cdf243bc79ad4676efe2f4e25f44117e7c46 Mon Sep 17 00:00:00 2001 From: Gary Pennington Date: Thu, 24 Nov 2022 14:21:04 +0000 Subject: [PATCH 19/45] improve router docker debug image (#2142) fixes: #2135 fixes: #2145 This PR repurposes the -debug image to make it the basis for a memory tracking image which we can use for investigating router memory issues via heaptrack. (https://github.com/KDE/heaptrack) The PR is a *breaking* change because it now automatically starts the router under the control of heaptrack. Technically, it's not really a breaking change and it's certainly not an API change, but I really want to draw people's attention to the fact that the debug image will now execute a lot slower than the non-debug image and use a lot more memory (to track memory with...). I've updated the docker documentation to show how to mount a local directory to store the heaptrack data. I haven't updated the kubernetes docs, because we don't go into that level of detail and we assume that a kubernetes devops person would know how to allocate and mount a PVC. --- NEXT_CHANGELOG.md | 11 +++++ dockerfiles/Dockerfile.router | 33 +++++++++++-- dockerfiles/diy/build_docker_image.sh | 17 ++++++- dockerfiles/diy/dockerfiles/Dockerfile.repo | 46 +++++++++++++++--- docs/source/containerization/docker.mdx | 53 ++++++++++++++------- docs/source/containerization/kubernetes.mdx | 2 +- docs/source/containerization/overview.mdx | 6 +-- 7 files changed, 135 insertions(+), 33 deletions(-) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 5655e1f346..4509648200 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -25,6 +25,17 @@ By [@USERNAME](https://github.com/USERNAME) in https://github.com/apollographql/ # [x.x.x] (unreleased) - 2022-mm-dd ## ❗ BREAKING ❗ + +### Router debug Docker images now run under the control of heaptrack ([Issue #2135](https://github.com/apollographql/router/pull/2142)) + +From the next release, our debug Docker image will invoke the router under the control of heaptrack. We are making this change to make it simple for users to investigate potential memory issues with the router. + +Do not run debug images in performance sensitive contexts. The tracking of memory allocations will significantly impact performance. In general, the debug image should only be used in consultation with Apollo engineering and support. + +Look at our documentation for examples of how to use the image in either Docker or Kubernetes. + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/2142 + ### Fix naming inconsistency of telemetry.metrics.common.attributes.router ([Issue #2076](https://github.com/apollographql/router/issues/2076)) Mirroring the rest of the config `router` should be `supergraph` diff --git a/dockerfiles/Dockerfile.router b/dockerfiles/Dockerfile.router index 40d475ef3d..f8b634cde0 100644 --- a/dockerfiles/Dockerfile.router +++ b/dockerfiles/Dockerfile.router @@ -1,6 +1,7 @@ FROM debian:bullseye-slim ARG ROUTER_RELEASE=latest +ARG DEBUG_IMAGE=false WORKDIR /dist @@ -8,8 +9,17 @@ WORKDIR /dist RUN \ apt-get update -y \ && apt-get install -y \ - curl \ - && rm -rf /var/lib/apt/lists/* + curl + +# If debug image, install heaptrack and make a data directory +RUN \ + if [ "${DEBUG_IMAGE}" = "true" ]; then \ + apt-get install -y heaptrack && \ + mkdir data; \ + fi + +# Clean up apt lists +RUN rm -rf /var/lib/apt/lists/* # Run the Router downloader which puts Router into current working directory RUN curl -sSL https://router.apollo.dev/download/nix/${ROUTER_RELEASE}/ | sh @@ -25,5 +35,20 @@ LABEL org.opencontainers.image.source="https://github.com/apollographql/router" ENV APOLLO_ROUTER_CONFIG_PATH="/dist/config/router.yaml" -# Default executable is the router -ENTRYPOINT ["/dist/router"] +# Create a wrapper script to run the router, use exec to ensure signals are handled correctly +RUN \ + echo '#!/usr/bin/env bash \ +\nset -e \ +\n \ +\nif [ -f "/usr/bin/heaptrack" ]; then \ +\n exec heaptrack -o /dist/data/router_heaptrack /dist/router "$@" \ +\nelse \ +\n exec /dist/router "$@" \ +\nfi \ +' > /dist/router_wrapper.sh + +# Make sure we can run our wrapper +RUN chmod 755 /dist/router_wrapper.sh + +# Default executable is the wrapper script +ENTRYPOINT ["/dist/router_wrapper.sh"] diff --git a/dockerfiles/diy/build_docker_image.sh b/dockerfiles/diy/build_docker_image.sh index 208f9b756b..41458ae52c 100755 --- a/dockerfiles/diy/build_docker_image.sh +++ b/dockerfiles/diy/build_docker_image.sh @@ -15,14 +15,18 @@ # Note: This utility makes assumptions about the existence of files relative # to the directory where it is executed. To work correctly you must # execute in the "repo"/dockerfiles/diy directory. +# Note: A debug image is an image where heaptrack is installed. The router +# is still a release build router, but all memory is being tracked +# under heaptrack. (https://github.com/KDE/heaptrack) ### ### # Terminate with a nice usage message ### usage () { - printf "Usage: build_docker_image.sh [-b [-r ]] []\n" + printf "Usage: build_docker_image.sh [-b [-r ]] [-d] []\n" printf "\t-b build docker image from the default repo, if not present build from a released version\n" + printf "\t-d build debug image, router will run under control of heaptrack\n" printf "\t-r build docker image from a specified repo, only valid with -b flag\n" printf "\t a valid release. If [-b] is specified, this is optional\n" printf "\tExample 1: Building HEAD from the repo\n" @@ -35,6 +39,8 @@ usage () { printf "\t\tbuild_docker_image.sh -b 7f7d223f42af34fad35b898d976bc07d0f5440c5\n" printf "\tExample 5: Building tag v0.9.1 from the released version\n" printf "\t\tbuild_docker_image.sh v0.9.1\n" + printf "\tExample 6: Building a debug image with tag v0.9.1 from the released version\n" + printf "\t\tbuild_docker_image.sh -d v0.9.1\n" exit 2 } @@ -56,13 +62,14 @@ terminate () { # If no ROUTER_VERSION specified, we are building HEAD from a repo ROUTER_VERSION= BUILD_IMAGE=false +DEBUG_IMAGE=false DEFAULT_REPO="https://github.com/apollographql/router.git" GIT_REPO= ### # Process Command Line ### -if ! args=$(getopt bhr: "$@"); then +if ! args=$(getopt bdhr: "$@"); then usage fi @@ -79,6 +86,10 @@ while :; do BUILD_IMAGE=true shift ;; + -d) + DEBUG_IMAGE=true + shift + ;; -r) GIT_REPO="${2}" shift; shift @@ -143,6 +154,7 @@ if [ "${BUILD_IMAGE}" = true ]; then git checkout "${ROUTER_VERSION}" > /dev/null 2>&1 || terminate "Couldn't checkout ${ROUTER_VERSION}" # Build our docker images docker build -q -t "router:${ROUTER_VERSION}" \ + --build-arg DEBUG_IMAGE="${DEBUG_IMAGE}" \ --build-arg ROUTER_VERSION="${ROUTER_VERSION}" \ --no-cache -f ../Dockerfile.repo . \ || terminate "Couldn't build router image" @@ -150,6 +162,7 @@ else # Let the user know what we are going to do echo "Building image: ${ROUTER_VERSION}" from released version"" docker build -q -t "router:${ROUTER_VERSION}" \ + --build-arg DEBUG_IMAGE="${DEBUG_IMAGE}" \ --build-arg ROUTER_RELEASE="${ROUTER_VERSION}" \ --no-cache -f Dockerfile.router . \ || terminate "Couldn't build router image" diff --git a/dockerfiles/diy/dockerfiles/Dockerfile.repo b/dockerfiles/diy/dockerfiles/Dockerfile.repo index 3e0a5b9664..4203c3ca12 100644 --- a/dockerfiles/diy/dockerfiles/Dockerfile.repo +++ b/dockerfiles/diy/dockerfiles/Dockerfile.repo @@ -28,18 +28,52 @@ RUN mkdir -p /dist/config && \ # Copy configuration for docker image COPY dockerfiles/router.yaml /dist/config +# Build our final image FROM debian:bullseye-slim -# Set labels for our image -LABEL org.opencontainers.image.authors="Apollo Graph, Inc. https://github.com/apollographql/router" -LABEL org.opencontainers.image.source="https://github.com/apollographql/router" +ARG DEBUG_IMAGE=false + +WORKDIR /dist + +# Install curl +RUN \ + apt-get update -y \ + && apt-get install -y \ + curl # Copy in the required files from our build image COPY --from=build --chown=root:root /dist /dist -WORKDIR /dist +# If debug image, install heaptrack and make a data directory +RUN \ + if [ "${DEBUG_IMAGE}" = "true" ]; then \ + apt-get install -y heaptrack && \ + mkdir data; \ + fi + +# Clean up apt lists +RUN rm -rf /var/lib/apt/lists/* + +# Set labels for our image +LABEL org.opencontainers.image.authors="Apollo Graph, Inc. https://github.com/apollographql/router" +LABEL org.opencontainers.image.source="https://github.com/apollographql/router" ENV APOLLO_ROUTER_CONFIG_PATH="/dist/config/router.yaml" -# Default executable is the router -ENTRYPOINT ["/dist/router"] +# Create a wrapper script to run the router, use exec to ensure signals are handled correctly +RUN \ + echo '#!/usr/bin/env bash \ +\nset -e \ +\n \ +\nif [ -f "/usr/bin/heaptrack" ]; then \ +\n exec heaptrack -o /dist/data/router_heaptrack /dist/router "$@" \ +\nelse \ +\n exec /dist/router "$@" \ +\nfi \ +' > /dist/router_wrapper.sh + +# Make sure we can run our wrapper +RUN chmod 755 /dist/router_wrapper.sh + +# Default executable is the wrapper script +ENTRYPOINT ["/dist/router_wrapper.sh"] diff --git a/docs/source/containerization/docker.mdx b/docs/source/containerization/docker.mdx index f56764921e..530246f380 100644 --- a/docs/source/containerization/docker.mdx +++ b/docs/source/containerization/docker.mdx @@ -48,7 +48,7 @@ docker run -p 4000:4000 \ ## Debugging your container -If you run the debug image, then it's easy to debug your container: +It's easy to debug your container by changing the `entrypoint` ```bash docker run -p 4000:4000 \ @@ -58,18 +58,31 @@ docker run -p 4000:4000 \ --rm \ --interactive \ --tty \ - --entrypoint=sh - ghcr.io/apollographql/router:-debug -/dist # pwd + --entrypoint=bash \ + ghcr.io/apollographql/router: +dist# pwd /dist -/dist # ls -LICENSE config router -README.md licenses.html schema -/dist # exit +dist# ls +config router schema +dist# exit +exit ``` In this case, we've added interactive and tty flags and changed the entrypoint of the image to be a shell. +## Running the debug container to investigate memory issues + +```bash +docker run -p 4000:4000 \ + --env APOLLO_GRAPH_REF="" \ + --env APOLLO_KEY="" \ + --mount "type=bind,source=/data,target=/dist/data" + --rm \ + ghcr.io/apollographql/router:-debug +``` + +The router will run under the control of heaptrack. The heaptrack output will be saved to the /data directory. The output can be analyzed directly using `heaptrack_gui` or `heaptrack_print` or shared with Apollo support staff. + ## Specifying the Supergraph If we don't want to use uplink to retrieve our subgraph, we can manually specify the details. @@ -91,16 +104,22 @@ In the `dockerfiles/diy` directory, we now provide a script, `build_docker_image ```bash % ./build_docker_image.sh -h -Usage: build_docker_image.sh [-b] [] - -b build docker image from a repo, if not present build from a released tarball +Usage: build_docker_image.sh [-b [-r ]] [-d] [] + -b build docker image from the default repo, if not present build from a released version + -d build debug image, router will run under control of heaptrack + -r build docker image from a specified repo, only valid with -b flag a valid release. If [-b] is specified, this is optional Example 1: Building HEAD from the repo build_docker_image.sh -b - Example 2: Building tag from the repo - build_docker_image.sh -b v1.4.0 - Example 3: Building commit hash from the repo - build_docker_image.sh -b 1c220d35acf9ad2537b8edc58c498390b6701d3d - Example 4: Building tag v1.4.0 from the released tarball - build_docker_image.sh v1.4.0 + Example 2: Building HEAD from a different repo + build_docker_image.sh -b -r /Users/anon/dev/router + Example 3: Building tag from the repo + build_docker_image.sh -b v0.9.1 + Example 4: Building commit hash from the repo + build_docker_image.sh -b 7f7d223f42af34fad35b898d976bc07d0f5440c5 + Example 5: Building tag v0.9.1 from the released version + build_docker_image.sh v0.9.1 + Example 6: Building a debug image with tag v0.9.1 from the released version + build_docker_image.sh -d v0.9.1 ``` -Note: The script has to be run from the `dockerfiles/diy` directory because it makes assumptions about the relative availability of various files. The example uses [distroless images](https://github.com/GoogleContainerTools/distroless) for the final image build. Feel free to modify the script to use images which better suit your own needs. +Note: The script has to be run from the `dockerfiles/diy` directory because it makes assumptions about the relative availability of various files. The example uses [debian:bullseye-slim image](https://hub.docker.com/_/debian/) for the final image build. Feel free to modify the script to use images which better suit your own needs, but be careful if using the [-d] flag since it makes the assumption that there is a `heaptrack` package available to install. diff --git a/docs/source/containerization/kubernetes.mdx b/docs/source/containerization/kubernetes.mdx index 080ec037cf..3be252c9e0 100644 --- a/docs/source/containerization/kubernetes.mdx +++ b/docs/source/containerization/kubernetes.mdx @@ -29,7 +29,7 @@ router docker images. You can use helm to install charts from an OCI registry as follows: ```bash -helm install --set router.configuration.telemetry.metrics.prometheus.enabled=true --set managedFederation.apiKey="REDACTED" --set managedFederation.graphRef="REDACTED" --create-namespace --namespace router-deploy router-test oci://ghcr.io/apollographql/helm-charts/router --version 1.0.0-rc.4 --values router/values.yaml +helm install --set router.configuration.telemetry.metrics.prometheus.enabled=true --set managedFederation.apiKey="REDACTED" --set managedFederation.graphRef="REDACTED" --create-namespace --namespace router-deploy router-test oci://ghcr.io/apollographql/helm-charts/router --version 1.0.0-rc.8 --values router/values.yaml ``` For more details about using helm with OCI based registries, see [here](https://helm.sh/docs/topics/registries/) diff --git a/docs/source/containerization/overview.mdx b/docs/source/containerization/overview.mdx index 236a64a229..0b13cdbbb2 100644 --- a/docs/source/containerization/overview.mdx +++ b/docs/source/containerization/overview.mdx @@ -6,11 +6,11 @@ import ElasticNotice from '../../shared/elastic-notice.mdx'; > More details about Apollo Router images are available [on GitHub](https://github.com/apollographql/router/pkgs/container/router). -Each release of the Apollo Router includes both default and debug images. Each image for a release contains the same build. Debug images additionally contain a "busybox" to help with container debugging. +Each release of the Apollo Router includes both default and debug images. Each image for a release contains the same build. Debug images have helpful debugging utilities installed and run the router under the control of heaptrack (https://github.com/KDE/heaptrack). -Here's a basic example of running a router image with Docker (make sure to replace `` with whichever version you want to use, such as `v0.1.0-preview.1`): +Here's a basic example of running a router image with Docker (make sure to replace `` with whichever version you want to use, such as `v1.4.0`): ```bash title="Docker" docker run -p 4000:4000 \ @@ -35,4 +35,4 @@ For examples of using router images in specific environments, see the guides for ## Image build -Apollo Router images are based on [distroless](https://github.com/GoogleContainerTools/distroless), which is designed to provide constrained, secure, and small images. +Apollo Router images are based on [debian:bullseye-slim](https://hub.docker.com/_/debian/), which is designed to provide constrained, secure, and small images. From 474ac76c2677e230c28fc6f15cd53c6d11a74267 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Thu, 24 Nov 2022 16:13:12 +0100 Subject: [PATCH 20/45] Request retries for subgraph queries (#2006) implements retries for subgraph requests. This uses Finagle's retry buckets algorithm --- NEXT_CHANGELOG.md | 24 +++++ ...nfiguration__tests__schema_generation.snap | 50 ++++++++++ .../src/plugins/traffic_shaping/mod.rs | 92 +++++++++++++------ .../src/plugins/traffic_shaping/retry.rs | 54 +++++++++++ apollo-router/src/services/subgraph.rs | 29 ++++++ docs/source/configuration/traffic-shaping.mdx | 6 ++ 6 files changed, 229 insertions(+), 26 deletions(-) create mode 100644 apollo-router/src/plugins/traffic_shaping/retry.rs diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 4509648200..ffafa08bfe 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -144,6 +144,30 @@ There are situations where comments and whitespace are not preserved. This may b By [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographql/router/pull/2116 +### *Experimental* subgraph request retry ([Issue #338](https://github.com/apollographql/router/issues/338), [Issue #1956](https://github.com/apollographql/router/issues/1956)) + +Implements subgraph request retries, using Finagle's retry buckets algorithm: +- it defines a minimal number of retries per second (`min_per_sec`, default is 10 retries per second), to +bootstrap the system or for low traffic deployments +- for each successful request, we add a "token" to the bucket, those tokens expire after `ttl` (default: 10 seconds) +- the number of available additional retries is a part of the number of tokens, defined by `retry_percent` (default is 0.2) + +This is activated in the `traffic_shaping` plugin, either globally or per subgraph: + +```yaml +traffic_shaping: + all: + experimental_retry: + min_per_sec: 10 + ttl: 10s + retry_percent: 0.2 + subgraphs: + accounts: + experimental_retry: + min_per_sec: 20 +``` + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2006 ## 🐛 Fixes diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index 971e0c6891..16795261dd 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -2080,6 +2080,31 @@ expression: "&schema" "type": "boolean", "nullable": true }, + "experimental_retry": { + "type": "object", + "properties": { + "min_per_sec": { + "description": "minimum rate of retries allowed to accomodate clients that have just started issuing requests, or clients that do not issue many requests per window. The default value is 10", + "type": "integer", + "format": "uint32", + "minimum": 0.0, + "nullable": true + }, + "retry_percent": { + "description": "percentage of calls to deposit that can be retried. This is in addition to any retries allowed for via min_per_sec. Must be between 0 and 1000, default value is 0.2", + "type": "number", + "format": "float", + "nullable": true + }, + "ttl": { + "description": "how long a single deposit should be considered. Must be between 1 and 60 seconds, default value is 10 seconds", + "default": null, + "type": "string" + } + }, + "additionalProperties": false, + "nullable": true + }, "global_rate_limit": { "description": "Enable global rate limiting", "type": "object", @@ -2189,6 +2214,31 @@ expression: "&schema" "type": "boolean", "nullable": true }, + "experimental_retry": { + "type": "object", + "properties": { + "min_per_sec": { + "description": "minimum rate of retries allowed to accomodate clients that have just started issuing requests, or clients that do not issue many requests per window. The default value is 10", + "type": "integer", + "format": "uint32", + "minimum": 0.0, + "nullable": true + }, + "retry_percent": { + "description": "percentage of calls to deposit that can be retried. This is in addition to any retries allowed for via min_per_sec. Must be between 0 and 1000, default value is 0.2", + "type": "number", + "format": "float", + "nullable": true + }, + "ttl": { + "description": "how long a single deposit should be considered. Must be between 1 and 60 seconds, default value is 10 seconds", + "default": null, + "type": "string" + } + }, + "additionalProperties": false, + "nullable": true + }, "global_rate_limit": { "description": "Enable global rate limiting", "type": "object", diff --git a/apollo-router/src/plugins/traffic_shaping/mod.rs b/apollo-router/src/plugins/traffic_shaping/mod.rs index 4b06322ee7..be952aaaf1 100644 --- a/apollo-router/src/plugins/traffic_shaping/mod.rs +++ b/apollo-router/src/plugins/traffic_shaping/mod.rs @@ -9,19 +9,21 @@ mod deduplication; mod rate; +mod retry; mod timeout; use std::collections::HashMap; use std::num::NonZeroU64; -use std::pin::Pin; use std::sync::Mutex; use std::time::Duration; +use futures::future::BoxFuture; use http::header::ACCEPT_ENCODING; use http::header::CONTENT_ENCODING; use http::HeaderValue; use schemars::JsonSchema; use serde::Deserialize; +use tower::retry::Retry; use tower::util::Either; use tower::util::Oneshot; use tower::BoxError; @@ -32,6 +34,7 @@ use tower::ServiceExt; use self::deduplication::QueryDeduplicationLayer; use self::rate::RateLimitLayer; pub(crate) use self::rate::RateLimited; +use self::retry::RetryPolicy; pub(crate) use self::timeout::Elapsed; use self::timeout::TimeoutLayer; use crate::error::ConfigurationError; @@ -64,6 +67,8 @@ struct Shaping { #[schemars(with = "String", default)] /// Enable timeout for incoming requests timeout: Option, + // *experimental feature*: Enables request retry + experimental_retry: Option, } impl Merge for Shaping { @@ -79,6 +84,42 @@ impl Merge for Shaping { .as_ref() .or(fallback.global_rate_limit.as_ref()) .cloned(), + experimental_retry: self + .experimental_retry + .as_ref() + .or(fallback.experimental_retry.as_ref()) + .cloned(), + }, + } + } +} + +#[derive(PartialEq, Debug, Clone, Deserialize, JsonSchema)] +#[serde(deny_unknown_fields)] +struct RetryConfig { + #[serde(deserialize_with = "humantime_serde::deserialize", default)] + #[schemars(with = "String", default)] + /// how long a single deposit should be considered. Must be between 1 and 60 seconds, + /// default value is 10 seconds + ttl: Option, + /// minimum rate of retries allowed to accomodate clients that have just started + /// issuing requests, or clients that do not issue many requests per window. The + /// default value is 10 + min_per_sec: Option, + /// percentage of calls to deposit that can be retried. This is in addition to any + /// retries allowed for via min_per_sec. Must be between 0 and 1000, default value + /// is 0.2 + retry_percent: Option, +} + +impl Merge for RetryConfig { + fn merge(&self, fallback: Option<&Self>) -> Self { + match fallback { + None => self.clone(), + Some(fallback) => RetryConfig { + ttl: self.ttl.or(fallback.ttl), + min_per_sec: self.min_per_sec.or(fallback.min_per_sec), + retry_percent: self.retry_percent.or(fallback.retry_percent), }, } } @@ -234,24 +275,15 @@ impl TrafficShaping { Error = BoxError, Future = tower::util::Either< tower::util::Either< - Pin< - Box< - (dyn futures::Future< - Output = std::result::Result< - subgraph::Response, - Box< - (dyn std::error::Error - + std::marker::Send - + std::marker::Sync - + 'static), - >, - >, - > + std::marker::Send - + 'static), - >, - >, + BoxFuture<'static, Result>, timeout::future::ResponseFuture< - Oneshot, S>, subgraph::Request>, + Oneshot< + tower::util::Either< + Retry, S>>, + tower::util::Either, S>, + >, + subgraph::Request, + >, >, >, >::Future, @@ -284,16 +316,24 @@ impl TrafficShaping { }) .clone() }); + + let retry = config.experimental_retry.as_ref().map(|config| { + let retry_policy = + RetryPolicy::new(config.ttl, config.min_per_sec, config.retry_percent); + tower::retry::RetryLayer::new(retry_policy) + }); + Either::A(ServiceBuilder::new() - .option_layer(config.deduplicate_query.unwrap_or_default().then( - QueryDeduplicationLayer::default - )) - .layer(TimeoutLayer::new( - config - .timeout - .unwrap_or(DEFAULT_TIMEOUT), + .option_layer(config.deduplicate_query.unwrap_or_default().then( + QueryDeduplicationLayer::default )) - .option_layer(rate_limit) + .layer(TimeoutLayer::new( + config + .timeout + .unwrap_or(DEFAULT_TIMEOUT), + )) + .option_layer(retry) + .option_layer(rate_limit) .service(service) .map_request(move |mut req: SubgraphRequest| { if let Some(compression) = config.compression { diff --git a/apollo-router/src/plugins/traffic_shaping/retry.rs b/apollo-router/src/plugins/traffic_shaping/retry.rs new file mode 100644 index 0000000000..6138581c21 --- /dev/null +++ b/apollo-router/src/plugins/traffic_shaping/retry.rs @@ -0,0 +1,54 @@ +use std::future; +use std::sync::Arc; +use std::time::Duration; + +use tower::retry::budget::Budget; +use tower::retry::Policy; + +#[derive(Clone, Default)] +pub(crate) struct RetryPolicy { + budget: Arc, +} + +impl RetryPolicy { + pub(crate) fn new( + duration: Option, + min_per_sec: Option, + retry_percent: Option, + ) -> Self { + Self { + budget: Arc::new(Budget::new( + duration.unwrap_or_else(|| Duration::from_secs(10)), + min_per_sec.unwrap_or(10), + retry_percent.unwrap_or(0.2), + )), + } + } +} + +impl Policy for RetryPolicy { + type Future = future::Ready; + + fn retry(&self, _req: &Req, result: Result<&Res, &E>) -> Option { + match result { + Ok(_) => { + // Treat all `Response`s as success, + // so deposit budget and don't retry... + self.budget.deposit(); + None + } + Err(_e) => { + let withdrew = self.budget.withdraw(); + if withdrew.is_err() { + return None; + } + + Some(future::ready(self.clone())) + } + } + } + + fn clone_request(&self, req: &Req) -> Option { + Some(req.clone()) + } +} diff --git a/apollo-router/src/services/subgraph.rs b/apollo-router/src/services/subgraph.rs index e7f247f502..1b9ea53a7c 100644 --- a/apollo-router/src/services/subgraph.rs +++ b/apollo-router/src/services/subgraph.rs @@ -74,6 +74,35 @@ impl Request { } } +impl Clone for Request { + fn clone(&self) -> Self { + // http::Request is not clonable so we have to rebuild a new one + // we don't use the extensions field for now + let mut builder = http::Request::builder() + .method(self.subgraph_request.method()) + .version(self.subgraph_request.version()) + .uri(self.subgraph_request.uri()); + + { + let headers = builder.headers_mut().unwrap(); + headers.extend( + self.subgraph_request + .headers() + .iter() + .map(|(name, value)| (name.clone(), value.clone())), + ); + } + let subgraph_request = builder.body(self.subgraph_request.body().clone()).unwrap(); + + Self { + supergraph_request: self.supergraph_request.clone(), + subgraph_request, + operation_kind: self.operation_kind, + context: self.context.clone(), + } + } +} + assert_impl_all!(Response: Send); #[derive(Debug)] #[non_exhaustive] diff --git a/docs/source/configuration/traffic-shaping.mdx b/docs/source/configuration/traffic-shaping.mdx index c01b7b6168..c61f48e698 100644 --- a/docs/source/configuration/traffic-shaping.mdx +++ b/docs/source/configuration/traffic-shaping.mdx @@ -12,6 +12,7 @@ The Apollo Router supports the following types of traffic shaping between itself - The router currently supports `gzip`, `br`, and `deflate`. - **Global rate limiting** - If you want to rate limit requests to subgraphs or to the router itself. - **Timeout**: - Set a timeout to subgraphs and router requests. +- **Request Retry**: - retry subgraph requests if they fail due to network errors. This implements Finagle's retry buckets mechanism. **Experimental feature**: retry configuration might change in the future. Each of these optimizations can reduce network bandwidth and CPU usage for your subgraphs. @@ -38,6 +39,11 @@ traffic_shaping: capacity: 10 interval: 5s # Must not be greater than 18_446_744_073_709_551_615 milliseconds and not less than 0 milliseconds timeout: 50s # If a request to the subgraph 'products' takes more than 50secs then cancel the request (30 sec by default) + experimental_retry: + min_per_sec: 10 # minimal number of retries per second (`min_per_sec`, default is 10 retries per second) + ttl: 10s # for each successful request, we register a token, that expires according to this option (default: 10s) + retry_percent: 0.2 # defines the proportion of available retries to the current number of tokens + ``` Any configuration under the `subgraphs` key takes precedence over configuration under the `all` key. In the example above, query deduplication is enabled for all subgraphs _except_ the `products` subgraph. From f6e0ef8563161b79e4430a860dc18cba1e407798 Mon Sep 17 00:00:00 2001 From: Coenen Benjamin Date: Thu, 24 Nov 2022 16:15:32 +0100 Subject: [PATCH 21/45] Add configuration and features to logging (#2040) close #1998 ## Basic configuration By default some logs containing sensible data (like request body, response body, headers) are not displayed even if we set the right log level. For example if you need to display raw responses from one of your subgraph it won't be displayed by default. To enable them you have to configure it thanks to the `when_header` setting in the new section `experimental_logging`. It let's you set different headers to enable more logs (request/response headers/body for supergraph and subgraphs) when the request contains these headers with corresponding values/regex. Here is an example how you can configure it: ```yaml title="router.yaml" telemetry: experimental_logging: format: json # By default it's "pretty" if you are in an interactive shell session display_filename: true # Display filename where the log is coming from. Default: true display_line_number: false # Display line number in the file where the log is coming from. Default: true # If one of these headers matches we will log supergraph and subgraphs requests/responses when_header: - name: apollo-router-log-request value: my_client headers: true # default: false body: true # default: false # log request for all requests coming from Iphones - name: user-agent match: ^Mozilla/5.0 (iPhone* headers: true ``` /!\ This PR also upgrade `tracing` (not to the latest version) because I needed a fix. Signed-off-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> --- Cargo.lock | 16 +- NEXT_CHANGELOG.md | 24 ++ .../templates/base/Cargo.toml | 2 +- apollo-router/Cargo.toml | 16 +- ...nfiguration__tests__schema_generation.snap | 100 +++++++ apollo-router/src/context.rs | 8 + apollo-router/src/plugins/telemetry/config.rs | 271 +++++++++++++++++- .../src/plugins/telemetry/metrics/apollo.rs | 1 + apollo-router/src/plugins/telemetry/mod.rs | 114 ++++++-- .../telemetry/tracing/apollo_telemetry.rs | 2 +- .../src/services/subgraph_service.rs | 26 +- .../src/testdata/datadog.router.yaml | 2 +- apollo-router/src/testdata/jaeger.router.yaml | 16 +- apollo-router/src/testdata/otlp.router.yaml | 2 +- apollo-router/src/testdata/zipkin.router.yaml | 2 +- apollo-router/tests/common.rs | 4 +- apollo-router/tests/integration_tests.rs | 6 + apollo-router/tests/jaeger_test.rs | 4 +- apollo-router/tests/logging_test.rs | 197 +++++++++++++ docs/source/configuration/logging.mdx | 35 ++- licenses.html | 62 ++-- 21 files changed, 806 insertions(+), 104 deletions(-) create mode 100644 apollo-router/tests/logging_test.rs diff --git a/Cargo.lock b/Cargo.lock index 7d183c0cd6..fea151b4d7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5360,9 +5360,9 @@ dependencies = [ [[package]] name = "tracing" -version = "0.1.34" +version = "0.1.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09" +checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160" dependencies = [ "cfg-if", "log", @@ -5384,11 +5384,11 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.26" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f" +checksum = "7b7358be39f2f274f322d2aaed611acc57f382e8eb1e5b48cb9ae30933495ce7" dependencies = [ - "lazy_static", + "once_cell", "valuable", ] @@ -5441,13 +5441,13 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.11" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bc28f93baff38037f64e6f43d34cfa1605f27a49c34e8a04c5e78b0babf2596" +checksum = "60db860322da191b40952ad9affe65ea23e7dd6a5c442c2c42865810c6ab8e6b" dependencies = [ "ansi_term", - "lazy_static", "matchers", + "once_cell", "regex", "serde", "serde_json", diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index ffafa08bfe..caa4cc4530 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -71,6 +71,30 @@ There is now a separate subcommand for config related operations: ## 🚀 Features +### Add configuration for logging and add more logs + +By default some logs containing sensible data (like request body, response body, headers) are not displayed even if we set the right log level. +For example if you need to display raw responses from one of your subgraph it won't be displayed by default. To enable them you have to configure it thanks to the `when_header` setting in the new section `experimental_logging`. It let's you set different headers to enable more logs (request/response headers/body for supergraph and subgraphs) when the request contains these headers with corresponding values/regex. +Here is an example how you can configure it: + +```yaml title="router.yaml" +telemetry: + experimental_logging: + format: json # By default it's "pretty" if you are in an interactive shell session + display_filename: true # Display filename where the log is coming from. Default: true + display_line_number: false # Display line number in the file where the log is coming from. Default: true + # If one of these headers matches we will log supergraph and subgraphs requests/responses + when_header: + - name: apollo-router-log-request + value: my_client + headers: true # default: false + body: true # default: false + # log request for all requests/responses headers coming from Iphones + - name: user-agent + match: ^Mozilla/5.0 (iPhone* + headers: true +``` + ### Provide multi-arch (amd64/arm64) Docker images for the Router ([Issue #1932](https://github.com/apollographql/router/pull/2138)) From the next release, our Docker images will be multi-arch. diff --git a/apollo-router-scaffold/templates/base/Cargo.toml b/apollo-router-scaffold/templates/base/Cargo.toml index c85a618ebc..dcfc7a04e2 100644 --- a/apollo-router-scaffold/templates/base/Cargo.toml +++ b/apollo-router-scaffold/templates/base/Cargo.toml @@ -32,7 +32,7 @@ serde = "1.0.136" serde_json = "1.0.79" tokio = { version = "1.17.0", features = ["full"] } tower = { version = "0.4.12", features = ["full"] } -tracing = "=0.1.34" +tracing = "0.1.35" # this makes build scripts and proc macros faster to compile [profile.dev.build-override] diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 7604086531..cd367acb24 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -183,20 +183,12 @@ tower-http = { version = "0.3.4", features = [ "timeout", ] } tower-service = "0.3.2" - -# We are constrained in our ability to update the `tracing` packages, which is -# tracked in https://github.com/apollographql/router/issues/1407. -# -# To codify this with code, a rule in our Renovate configuration constraints and -# groups `^tracing` and `^opentelemetry*` dependencies together as of -# https://github.com/apollographql/router/pull/1509. A comment which exists -# there (and above around the `opentelemetry` packages) should this ever change. -# There is also a note below on the `^tracing` dev-dependencies! -tracing = "=0.1.34" -tracing-core = "=0.1.26" +tracing = "0.1.35" +tracing-core = "0.1.28" tracing-futures = { version = "0.2.5", features = ["futures-03"] } tracing-opentelemetry = "0.17.4" -tracing-subscriber = { version = "0.3.11", features = ["env-filter", "json"] } +tracing-subscriber = { version = "0.3.14", features = ["env-filter", "json"] } + url = { version = "2.3.1", features = ["serde"] } urlencoding = "2.1.2" uuid = { version = "1.2.2", features = ["serde", "v4"] } diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index 16795261dd..1d888d171d 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -787,6 +787,106 @@ expression: "&schema" "additionalProperties": false, "nullable": true }, + "experimental_logging": { + "type": "object", + "properties": { + "display_filename": { + "default": true, + "type": "boolean" + }, + "display_line_number": { + "default": true, + "type": "boolean" + }, + "format": { + "description": "Log format", + "oneOf": [ + { + "description": "Pretty text format (default if you're running from a tty)", + "type": "string", + "enum": [ + "pretty" + ] + }, + { + "description": "Json log format", + "type": "string", + "enum": [ + "json" + ] + } + ] + }, + "when_header": { + "description": "Log configuration to log request and response for subgraphs and supergraph", + "type": "array", + "items": { + "anyOf": [ + { + "description": "Match header value given a regex to display logs", + "type": "object", + "required": [ + "match", + "name" + ], + "properties": { + "body": { + "description": "Display request/response body (default: false)", + "default": false, + "type": "boolean" + }, + "headers": { + "description": "Display request/response headers (default: false)", + "default": false, + "type": "boolean" + }, + "match": { + "description": "Regex to match the header value", + "type": "string" + }, + "name": { + "description": "Header name", + "type": "string" + } + }, + "additionalProperties": false + }, + { + "description": "Match header value given a value to display logs", + "type": "object", + "required": [ + "name", + "value" + ], + "properties": { + "body": { + "description": "Display request/response body (default: false)", + "default": false, + "type": "boolean" + }, + "headers": { + "description": "Display request/response headers (default: false)", + "default": false, + "type": "boolean" + }, + "name": { + "description": "Header name", + "type": "string" + }, + "value": { + "description": "Header value", + "type": "string" + } + }, + "additionalProperties": false + } + ] + } + } + }, + "additionalProperties": false, + "nullable": true + }, "metrics": { "type": "object", "properties": { diff --git a/apollo-router/src/context.rs b/apollo-router/src/context.rs index 21c7f839b5..d726c4d4fa 100644 --- a/apollo-router/src/context.rs +++ b/apollo-router/src/context.rs @@ -47,6 +47,14 @@ impl Context { } impl Context { + /// Returns true if the context contains a value for the specified key. + pub fn contains_key(&self, key: K) -> bool + where + K: Into, + { + self.entries.contains_key(&key.into()) + } + /// Get a value from the context using the provided key. /// /// Semantics: diff --git a/apollo-router/src/plugins/telemetry/config.rs b/apollo-router/src/plugins/telemetry/config.rs index 9cbcc182c9..920895ce10 100644 --- a/apollo-router/src/plugins/telemetry/config.rs +++ b/apollo-router/src/plugins/telemetry/config.rs @@ -6,11 +6,14 @@ use opentelemetry::sdk::Resource; use opentelemetry::Array; use opentelemetry::KeyValue; use opentelemetry::Value; +use regex::Regex; use schemars::JsonSchema; use serde::Deserialize; use super::metrics::MetricsAttributesConf; use super::*; +use crate::configuration::ConfigurationError; +use crate::plugin::serde::deserialize_regex; use crate::plugins::telemetry::metrics; #[derive(thiserror::Error, Debug)] @@ -46,7 +49,8 @@ impl GenericWith for T where Self: Sized {} #[derive(Clone, Default, Debug, Deserialize, JsonSchema)] #[serde(deny_unknown_fields, rename_all = "snake_case")] pub struct Conf { - #[allow(dead_code)] + #[serde(rename = "experimental_logging")] + pub(crate) logging: Option, pub(crate) metrics: Option, pub(crate) tracing: Option, pub(crate) apollo: Option, @@ -86,6 +90,162 @@ pub(crate) struct Tracing { pub(crate) datadog: Option, } +#[derive(Clone, Default, Debug, Deserialize, JsonSchema)] +#[serde(deny_unknown_fields)] +pub(crate) struct Logging { + /// Log format + #[serde(default)] + pub(crate) format: LoggingFormat, + #[serde(default = "default_display_filename")] + pub(crate) display_filename: bool, + #[serde(default = "default_display_line_number")] + pub(crate) display_line_number: bool, + /// Log configuration to log request and response for subgraphs and supergraph + #[serde(default)] + pub(crate) when_header: Vec, +} + +pub(crate) const fn default_display_filename() -> bool { + true +} + +pub(crate) const fn default_display_line_number() -> bool { + true +} + +impl Logging { + pub(crate) fn validate(&self) -> Result<(), ConfigurationError> { + let misconfiguration = self.when_header.iter().any(|cfg| match cfg { + HeaderLoggingCondition::Matching { headers, body, .. } + | HeaderLoggingCondition::Value { headers, body, .. } => !body && !headers, + }); + + if misconfiguration { + Err(ConfigurationError::InvalidConfiguration { + message: "'when_header' configuration for logging is invalid", + error: String::from( + "body and headers must not be both false because it doesn't enable any logs", + ), + }) + } else { + Ok(()) + } + } + + /// Returns if we should display the request/response headers and body given the `SupergraphRequest` + pub(crate) fn should_log(&self, req: &SupergraphRequest) -> (bool, bool) { + self.when_header + .iter() + .fold((false, false), |(log_headers, log_body), current| { + let (current_log_headers, current_log_body) = current.should_log(req); + ( + log_headers || current_log_headers, + log_body || current_log_body, + ) + }) + } +} + +#[derive(Clone, Debug, Deserialize, JsonSchema)] +#[serde(untagged, deny_unknown_fields, rename_all = "snake_case")] +pub(crate) enum HeaderLoggingCondition { + /// Match header value given a regex to display logs + Matching { + /// Header name + name: String, + /// Regex to match the header value + #[schemars(schema_with = "string_schema", rename = "match")] + #[serde(deserialize_with = "deserialize_regex", rename = "match")] + matching: Regex, + /// Display request/response headers (default: false) + #[serde(default)] + headers: bool, + /// Display request/response body (default: false) + #[serde(default)] + body: bool, + }, + /// Match header value given a value to display logs + Value { + /// Header name + name: String, + /// Header value + value: String, + /// Display request/response headers (default: false) + #[serde(default)] + headers: bool, + /// Display request/response body (default: false) + #[serde(default)] + body: bool, + }, +} + +impl HeaderLoggingCondition { + /// Returns if we should display the request/response headers and body given the `SupergraphRequest` + pub(crate) fn should_log(&self, req: &SupergraphRequest) -> (bool, bool) { + match self { + HeaderLoggingCondition::Matching { + name, + matching: matched, + headers, + body, + } => { + let header_match = req + .supergraph_request + .headers() + .get(name) + .and_then(|h| h.to_str().ok()) + .map(|h| matched.is_match(h)) + .unwrap_or_default(); + + if header_match { + (*headers, *body) + } else { + (false, false) + } + } + HeaderLoggingCondition::Value { + name, + value, + headers, + body, + } => { + let header_match = req + .supergraph_request + .headers() + .get(name) + .and_then(|h| h.to_str().ok()) + .map(|h| value.as_str() == h) + .unwrap_or_default(); + + if header_match { + (*headers, *body) + } else { + (false, false) + } + } + } + } +} + +#[derive(Clone, Debug, Deserialize, JsonSchema, Copy)] +#[serde(deny_unknown_fields, rename_all = "snake_case")] +pub(crate) enum LoggingFormat { + /// Pretty text format (default if you're running from a tty) + Pretty, + /// Json log format + Json, +} + +impl Default for LoggingFormat { + fn default() -> Self { + if atty::is(atty::Stream::Stdout) { + Self::Pretty + } else { + Self::Json + } + } +} + #[derive(Clone, Default, Debug, Deserialize, JsonSchema)] #[serde(deny_unknown_fields, rename_all = "snake_case")] pub(crate) struct Propagation { @@ -333,3 +493,112 @@ impl Conf { ) } } + +fn string_schema(gen: &mut schemars::gen::SchemaGenerator) -> schemars::schema::Schema { + String::json_schema(gen) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_logging_conf_validation() { + let logging_conf = Logging { + format: LoggingFormat::default(), + display_filename: false, + display_line_number: false, + when_header: vec![HeaderLoggingCondition::Value { + name: "test".to_string(), + value: String::new(), + headers: true, + body: false, + }], + }; + + logging_conf.validate().unwrap(); + + let logging_conf = Logging { + format: LoggingFormat::default(), + display_filename: false, + display_line_number: false, + when_header: vec![HeaderLoggingCondition::Value { + name: "test".to_string(), + value: String::new(), + headers: false, + body: false, + }], + }; + + let validate_res = logging_conf.validate(); + assert!(validate_res.is_err()); + assert_eq!(validate_res.unwrap_err().to_string(), "'when_header' configuration for logging is invalid: body and headers must not be both false because it doesn't enable any logs"); + } + + #[test] + fn test_logging_conf_should_log() { + let logging_conf = Logging { + format: LoggingFormat::default(), + display_filename: false, + display_line_number: false, + when_header: vec![HeaderLoggingCondition::Matching { + name: "test".to_string(), + matching: Regex::new("^foo*").unwrap(), + headers: true, + body: false, + }], + }; + let req = SupergraphRequest::fake_builder() + .header("test", "foobar") + .build() + .unwrap(); + assert_eq!(logging_conf.should_log(&req), (true, false)); + + let logging_conf = Logging { + format: LoggingFormat::default(), + display_filename: false, + display_line_number: false, + when_header: vec![HeaderLoggingCondition::Value { + name: "test".to_string(), + value: String::from("foobar"), + headers: true, + body: false, + }], + }; + assert_eq!(logging_conf.should_log(&req), (true, false)); + + let logging_conf = Logging { + format: LoggingFormat::default(), + display_filename: false, + display_line_number: false, + when_header: vec![ + HeaderLoggingCondition::Matching { + name: "test".to_string(), + matching: Regex::new("^foo*").unwrap(), + headers: true, + body: false, + }, + HeaderLoggingCondition::Matching { + name: "test".to_string(), + matching: Regex::new("^*bar$").unwrap(), + headers: false, + body: true, + }, + ], + }; + assert_eq!(logging_conf.should_log(&req), (true, true)); + + let logging_conf = Logging { + format: LoggingFormat::default(), + display_filename: false, + display_line_number: false, + when_header: vec![HeaderLoggingCondition::Matching { + name: "testtest".to_string(), + matching: Regex::new("^foo*").unwrap(), + headers: true, + body: false, + }], + }; + assert_eq!(logging_conf.should_log(&req), (false, false)); + } +} diff --git a/apollo-router/src/plugins/telemetry/metrics/apollo.rs b/apollo-router/src/plugins/telemetry/metrics/apollo.rs index d19d82a2ef..df97556df6 100644 --- a/apollo-router/src/plugins/telemetry/metrics/apollo.rs +++ b/apollo-router/src/plugins/telemetry/metrics/apollo.rs @@ -240,6 +240,7 @@ mod test { ) -> Result { Telemetry::new(PluginInit::new( config::Conf { + logging: None, metrics: None, tracing: None, apollo: Some(apollo_config), diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index d9d7566520..cebd05f7b1 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -64,6 +64,8 @@ use crate::layers::ServiceBuilderExt; use crate::plugin::Plugin; use crate::plugin::PluginInit; use crate::plugins::telemetry::apollo::ForwardHeaders; +use crate::plugins::telemetry::config::default_display_filename; +use crate::plugins::telemetry::config::default_display_line_number; use crate::plugins::telemetry::config::MetricsCommon; use crate::plugins::telemetry::config::Trace; use crate::plugins::telemetry::formatters::JsonFields; @@ -76,6 +78,7 @@ use crate::plugins::telemetry::metrics::BasicMetrics; use crate::plugins::telemetry::metrics::MetricsBuilder; use crate::plugins::telemetry::metrics::MetricsConfigurator; use crate::plugins::telemetry::metrics::MetricsExporterHandle; +use crate::plugins::telemetry::tracing::apollo_telemetry::APOLLO_PRIVATE_OPERATION_SIGNATURE; use crate::plugins::telemetry::tracing::TracingConfigurator; use crate::query_planner::USAGE_REPORTING; use crate::register_plugin; @@ -102,6 +105,7 @@ pub(crate) mod formatters; mod metrics; mod otlp; mod tracing; +// Tracing consts pub(crate) const SUPERGRAPH_SPAN_NAME: &str = "supergraph"; pub(crate) const SUBGRAPH_SPAN_NAME: &str = "subgraph"; const CLIENT_NAME: &str = "apollo_telemetry::client_name"; @@ -110,6 +114,8 @@ const ATTRIBUTES: &str = "apollo_telemetry::metrics_attributes"; const SUBGRAPH_ATTRIBUTES: &str = "apollo_telemetry::subgraph_metrics_attributes"; pub(crate) const STUDIO_EXCLUDE: &str = "apollo_telemetry::studio::exclude"; pub(crate) const FTV1_DO_NOT_SAMPLE: &str = "apollo_telemetry::studio::ftv1_do_not_sample"; +pub(crate) const LOGGING_DISPLAY_HEADERS: &str = "apollo_telemetry::logging::display_headers"; +pub(crate) const LOGGING_DISPLAY_BODY: &str = "apollo_telemetry::logging::display_body"; const DEFAULT_SERVICE_NAME: &str = "apollo-router"; static TELEMETRY_LOADED: OnceCell = OnceCell::new(); @@ -197,11 +203,20 @@ impl Plugin for Telemetry { { // Record the operation signature on the router span Span::current().record( - "apollo_private.operation_signature", + APOLLO_PRIVATE_OPERATION_SIGNATURE.as_str(), &usage_reporting.stats_report_key.as_str(), ); } - resp + if resp.context.contains_key(LOGGING_DISPLAY_HEADERS) { + ::tracing::info!(http.response.headers = ?resp.response.headers(), "Supergraph response headers"); + } + let display_body = resp.context.contains_key(LOGGING_DISPLAY_BODY); + resp.map_stream(move |gql_response| { + if display_body { + ::tracing::info!(http.response.body = ?gql_response, "Supergraph GraphQL response"); + } + gql_response + }) }) .map_future_with_request_data( move |req: &SupergraphRequest| { @@ -381,6 +396,9 @@ impl Telemetry { _ => None, }; + if let Some(logging_conf) = &config.logging { + logging_conf.validate()?; + } // Setup metrics // The act of setting up metrics will overwrite a global meter. However it is essential that // we use the aggregate meter provider that is created below. It enables us to support @@ -428,8 +446,20 @@ impl Telemetry { EnvFilter::try_new(log_level) .context("could not parse log configuration")?, ) - .with_file(true) - .with_line_number(true); + .with_file( + config + .logging + .as_ref() + .map(|l| l.display_filename) + .unwrap_or(default_display_filename()), + ) + .with_line_number( + config + .logging + .as_ref() + .map(|l| l.display_line_number) + .unwrap_or(default_display_line_number()), + ); if let Some(sub) = subscriber { let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); @@ -437,33 +467,43 @@ impl Telemetry { if let Err(e) = set_global_default(subscriber) { ::tracing::error!("cannot set global subscriber: {:?}", e); } - } else if atty::is(atty::Stream::Stdout) { - let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); - - let subscriber = sub_builder - .event_format(formatters::TextFormatter::new()) - .finish() - .with(telemetry); - if let Err(e) = set_global_default(subscriber) { - ::tracing::error!("cannot set global subscriber: {:?}", e); - } } else { - let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); - - let subscriber = sub_builder - .map_event_format(|e| { - e.json() - .with_current_span(true) - .with_span_list(true) - .flatten_event(true) - }) - .map_fmt_fields(|_f| JsonFields::new()) - .finish() - .with(telemetry); - if let Err(e) = set_global_default(subscriber) { - ::tracing::error!("cannot set global subscriber: {:?}", e); + match config + .logging + .as_ref() + .map(|l| l.format) + .unwrap_or_default() + { + config::LoggingFormat::Pretty => { + let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); + + let subscriber = sub_builder + .event_format(formatters::TextFormatter::new()) + .finish() + .with(telemetry); + if let Err(e) = set_global_default(subscriber) { + ::tracing::error!("cannot set global subscriber: {:?}", e); + } + } + config::LoggingFormat::Json => { + let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); + + let subscriber = sub_builder + .map_event_format(|e| { + e.json() + .with_current_span(true) + .with_span_list(true) + .flatten_event(true) + }) + .map_fmt_fields(|_f| JsonFields::new()) + .finish() + .with(telemetry); + if let Err(e) = set_global_default(subscriber) { + ::tracing::error!("cannot set global subscriber: {:?}", e); + } + } } - }; + } } Ok(true) @@ -829,6 +869,22 @@ impl Telemetry { .unwrap_or_default() .to_string(), ); + let (should_log_headers, should_log_body) = config + .logging + .as_ref() + .map(|cfg| cfg.should_log(req)) + .unwrap_or_default(); + if should_log_headers { + ::tracing::info!(http.request.headers = ?req.supergraph_request.headers(), "Supergraph request headers"); + + let _ = req.context.insert(LOGGING_DISPLAY_HEADERS, true); + } + if should_log_body { + ::tracing::info!(http.request.body = ?req.supergraph_request.body(), "Supergraph request body"); + + let _ = req.context.insert(LOGGING_DISPLAY_BODY, true); + } + if let Some(metrics_conf) = &config.metrics { // List of custom attributes for metrics let mut attributes: HashMap = HashMap::new(); diff --git a/apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs b/apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs index 41800f6578..39affe53de 100644 --- a/apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs +++ b/apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs @@ -49,7 +49,7 @@ const APOLLO_PRIVATE_GRAPHQL_VARIABLES: Key = Key::from_static_str("apollo_private.graphql.variables"); const APOLLO_PRIVATE_HTTP_REQUEST_HEADERS: Key = Key::from_static_str("apollo_private.http.request_headers"); -const APOLLO_PRIVATE_OPERATION_SIGNATURE: Key = +pub(crate) const APOLLO_PRIVATE_OPERATION_SIGNATURE: Key = Key::from_static_str("apollo_private.operation_signature"); const APOLLO_PRIVATE_FTV1: Key = Key::from_static_str("apollo_private.ftv1"); const APOLLO_PRIVATE_PATH: Key = Key::from_static_str("apollo_private.path"); diff --git a/apollo-router/src/services/subgraph_service.rs b/apollo-router/src/services/subgraph_service.rs index 1df74aecab..e9137ce55b 100644 --- a/apollo-router/src/services/subgraph_service.rs +++ b/apollo-router/src/services/subgraph_service.rs @@ -39,6 +39,8 @@ use crate::axum_factory::utils::APPLICATION_JSON_HEADER_VALUE; use crate::axum_factory::utils::GRAPHQL_JSON_RESPONSE_HEADER_VALUE; use crate::error::FetchError; use crate::graphql; +use crate::plugins::telemetry::LOGGING_DISPLAY_BODY; +use crate::plugins::telemetry::LOGGING_DISPLAY_HEADERS; #[derive(PartialEq, Debug, Clone, Deserialize, JsonSchema, Copy)] #[serde(rename_all = "lowercase")] @@ -115,7 +117,6 @@ impl tower::Service for SubgraphService { Box::pin(async move { let (parts, body) = subgraph_request.into_parts(); - let body = serde_json::to_string(&body).expect("JSON serialization should not fail"); let compressed_body = compress(body, &parts.headers) @@ -142,7 +143,7 @@ impl tower::Service for SubgraphService { propagator.inject_context( &Span::current().context(), &mut opentelemetry_http::HeaderInjector(request.headers_mut()), - ) + ); }); let schema_uri = request.uri(); @@ -157,6 +158,15 @@ impl tower::Service for SubgraphService { 0 } }); + let display_headers = context.contains_key(LOGGING_DISPLAY_HEADERS); + let display_body = context.contains_key(LOGGING_DISPLAY_BODY); + if display_headers { + tracing::info!(http.request.headers = ?request.headers(), apollo.subgraph.name = %service_name, "Request headers to subgraph {service_name:?}"); + } + if display_body { + tracing::info!(http.request.body = ?request.body(), apollo.subgraph.name = %service_name, "Request body to subgraph {service_name:?}"); + } + let path = schema_uri.path().to_string(); let response = client .call(request) @@ -177,9 +187,13 @@ impl tower::Service for SubgraphService { reason: err.to_string(), } })?; - // Keep our parts, we'll need them later let (parts, body) = response.into_parts(); + if display_headers { + tracing::info!( + http.response.headers = ?parts.headers, apollo.subgraph.name = %service_name, "Response headers from subgraph {service_name:?}" + ); + } if let Some(content_type) = parts.headers.get(header::CONTENT_TYPE) { if let Ok(content_type_str) = content_type.to_str() { // Using .contains because sometimes we could have charset included (example: "application/json; charset=utf-8") @@ -217,6 +231,12 @@ impl tower::Service for SubgraphService { } })?; + if display_body { + tracing::info!( + http.response.body = %String::from_utf8_lossy(&body), apollo.subgraph.name = %service_name, "Raw response body from subgraph {service_name:?} received" + ); + } + let graphql: graphql::Response = tracing::debug_span!("parse_subgraph_response") .in_scope(|| { graphql::Response::from_bytes(&service_name, body).map_err(|error| { diff --git a/apollo-router/src/testdata/datadog.router.yaml b/apollo-router/src/testdata/datadog.router.yaml index 0e9e98346d..46c95a7cd7 100644 --- a/apollo-router/src/testdata/datadog.router.yaml +++ b/apollo-router/src/testdata/datadog.router.yaml @@ -3,4 +3,4 @@ telemetry: trace_config: service_name: router datadog: - endpoint: default + endpoint: default \ No newline at end of file diff --git a/apollo-router/src/testdata/jaeger.router.yaml b/apollo-router/src/testdata/jaeger.router.yaml index 641e32ca47..3741c66d64 100644 --- a/apollo-router/src/testdata/jaeger.router.yaml +++ b/apollo-router/src/testdata/jaeger.router.yaml @@ -1,10 +1,24 @@ telemetry: tracing: + propagation: + jaeger: true trace_config: service_name: router jaeger: scheduled_delay: 100ms agent: endpoint: default + experimental_logging: + when_header: + - name: apollo-router-log-request + value: test + headers: true # default: false + body: true # default: false + # log request for all requests coming from Iphones + - name: custom-header + match: ^foo.* + headers: true override_subgraph_url: - products: http://localhost:4005 \ No newline at end of file + products: http://localhost:4005 +include_subgraph_errors: + all: true diff --git a/apollo-router/src/testdata/otlp.router.yaml b/apollo-router/src/testdata/otlp.router.yaml index cbfa7ad982..93ef400426 100644 --- a/apollo-router/src/testdata/otlp.router.yaml +++ b/apollo-router/src/testdata/otlp.router.yaml @@ -3,4 +3,4 @@ telemetry: trace_config: service_name: router otlp: - endpoint: default + endpoint: default \ No newline at end of file diff --git a/apollo-router/src/testdata/zipkin.router.yaml b/apollo-router/src/testdata/zipkin.router.yaml index 90cef738c6..a745c863bf 100644 --- a/apollo-router/src/testdata/zipkin.router.yaml +++ b/apollo-router/src/testdata/zipkin.router.yaml @@ -3,4 +3,4 @@ telemetry: trace_config: service_name: router zipkin: - endpoint: default + endpoint: default \ No newline at end of file diff --git a/apollo-router/tests/common.rs b/apollo-router/tests/common.rs index 609b4b607a..0594e3bf90 100644 --- a/apollo-router/tests/common.rs +++ b/apollo-router/tests/common.rs @@ -109,7 +109,7 @@ impl TracingTest { propagator.inject_context( &span.context(), &mut opentelemetry_http::HeaderInjector(request.headers_mut()), - ) + ); }); request.headers_mut().remove(ACCEPT); match client.send(request).await { @@ -121,7 +121,7 @@ impl TracingTest { return id; } Err(e) => { - tracing::debug!("query failed: {}", e); + eprintln!("query failed: {}", e); } } tokio::time::sleep(Duration::from_millis(100)).await; diff --git a/apollo-router/tests/integration_tests.rs b/apollo-router/tests/integration_tests.rs index 98c131af52..04b2ff4990 100644 --- a/apollo-router/tests/integration_tests.rs +++ b/apollo-router/tests/integration_tests.rs @@ -986,6 +986,12 @@ fn redact_dynamic() -> Redaction { { return Content::Seq(vec![value_slice.get(0).unwrap().clone(), Content::I64(0)]); } + if value_slice.get(0).and_then(|v| v.as_str()) == Some("response_headers") { + return Content::Seq(vec![ + value_slice.get(0).unwrap().clone(), + Content::String("[REDACTED]".to_string()), + ]); + } } value }) diff --git a/apollo-router/tests/jaeger_test.rs b/apollo-router/tests/jaeger_test.rs index 6f7335c008..3caac2ef6f 100644 --- a/apollo-router/tests/jaeger_test.rs +++ b/apollo-router/tests/jaeger_test.rs @@ -273,7 +273,5 @@ async fn subgraph() { let make_svc = make_service_fn(|_conn| async { Ok::<_, Infallible>(service_fn(handle)) }); let server = Server::bind(&SocketAddr::from(([127, 0, 0, 1], 4005))).serve(make_svc); - if let Err(e) = server.await { - eprintln!("server error: {}", e); - } + server.await.unwrap(); } diff --git a/apollo-router/tests/logging_test.rs b/apollo-router/tests/logging_test.rs new file mode 100644 index 0000000000..20fb5c93b9 --- /dev/null +++ b/apollo-router/tests/logging_test.rs @@ -0,0 +1,197 @@ +use std::sync::Arc; +use std::sync::Mutex; + +use apollo_router::_private::TelemetryPlugin; +use apollo_router::graphql; +use apollo_router::services::supergraph; +use tower::ServiceExt; +use tracing::field; +use tracing::Level; +use tracing::Metadata; +use tracing::Subscriber; +use tracing_subscriber::registry::LookupSpan; +use tracing_subscriber::Registry; + +struct TestLogSubscriber { + registry: Registry, + event_metadata: Arc>>>, +} + +impl Subscriber for TestLogSubscriber { + fn enabled(&self, _metadata: &Metadata<'_>) -> bool { + true + } + + fn new_span(&self, span: &tracing_core::span::Attributes<'_>) -> tracing_core::span::Id { + self.registry.new_span(span) + } + + fn record(&self, span: &tracing_core::span::Id, values: &tracing_core::span::Record<'_>) { + self.registry.record(span, values) + } + + fn record_follows_from(&self, span: &tracing_core::span::Id, follows: &tracing_core::span::Id) { + self.registry.record_follows_from(span, follows) + } + + fn event(&self, event: &tracing::Event<'_>) { + if event.metadata().target().starts_with("apollo_router") + && event.metadata().level() == &Level::INFO + { + self.event_metadata.lock().unwrap().push(event.metadata()); + } + } + + fn enter(&self, span: &tracing_core::span::Id) { + self.registry.enter(span) + } + + fn exit(&self, span: &tracing_core::span::Id) { + self.registry.exit(span) + } +} + +impl<'a> LookupSpan<'a> for TestLogSubscriber { + type Data = tracing_subscriber::registry::Data<'a>; + + fn span_data(&'a self, id: &tracing::Id) -> Option { + self.registry.span_data(id) + } +} + +async fn setup_router( + config: serde_json::Value, + logging_config: serde_json::Value, + subscriber: TestLogSubscriber, +) -> supergraph::BoxCloneService { + let telemetry = TelemetryPlugin::new_with_subscriber( + serde_json::json!({ + "tracing": {}, + "experimental_logging": logging_config, + "apollo": { + "schema_id": "" + } + }), + subscriber, + ) + .await + .unwrap(); + + apollo_router::TestHarness::builder() + .with_subgraph_network_requests() + .configuration_json(config) + .unwrap() + .schema(include_str!("fixtures/supergraph.graphql")) + .extra_plugin(telemetry) + .build() + .await + .unwrap() +} + +async fn query_with_router( + router: supergraph::BoxCloneService, + request: supergraph::Request, +) -> graphql::Response { + router + .oneshot(request) + .await + .unwrap() + .next_response() + .await + .unwrap() +} + +#[derive(Default, Clone, PartialEq, Debug)] +struct LoggingCount { + supergraph_request_headers_count: usize, + supergraph_request_body_count: usize, + supergraph_response_body_count: usize, + supergraph_response_headers_count: usize, + subgraph_request_body_count: usize, + subgraph_request_headers_count: usize, + subgraph_response_body_count: usize, + subgraph_response_headers_count: usize, +} + +impl LoggingCount { + const RESPONSE_BODY: &'static str = "http.response.body"; + const RESPONSE_HEADERS: &'static str = "http.response.headers"; + const REQUEST_HEADERS: &'static str = "http.request.headers"; + const REQUEST_BODY: &'static str = "http.request.body"; + + fn count(&mut self, fields: &field::FieldSet) { + let fields_name: Vec<&str> = fields.iter().map(|f| f.name()).collect(); + if fields_name.contains(&"apollo.subgraph.name") { + if fields_name.contains(&Self::RESPONSE_BODY) { + self.subgraph_response_body_count += 1; + } + if fields_name.contains(&Self::RESPONSE_HEADERS) { + self.subgraph_response_headers_count += 1; + } + if fields_name.contains(&Self::REQUEST_HEADERS) { + self.subgraph_request_headers_count += 1; + } + if fields_name.contains(&Self::REQUEST_BODY) { + self.subgraph_request_body_count += 1; + } + } else { + if fields_name.contains(&Self::RESPONSE_BODY) { + self.supergraph_response_body_count += 1; + } + if fields_name.contains(&Self::RESPONSE_HEADERS) { + self.supergraph_response_headers_count += 1; + } + if fields_name.contains(&Self::REQUEST_HEADERS) { + self.supergraph_request_headers_count += 1; + } + if fields_name.contains(&Self::REQUEST_BODY) { + self.supergraph_request_body_count += 1; + } + } + } +} + +#[tokio::test(flavor = "multi_thread")] +async fn simple_query_should_display_logs_for_subgraph_and_supergraph() { + let logging_config = serde_json::json!({ + "when_header": [{ + "name": "custom-header", + "match": "^foo.*", + "headers": true + }] + }); + let request = supergraph::Request::fake_builder() + .header("custom-header", "foobar") + .query(r#"{ topProducts { upc name reviews {id product { name } author { id name } } } }"#) + .variable("topProductsFirst", 2_i32) + .variable("reviewsForAuthorAuthorId", 1_i32) + .build() + .expect("expecting valid request"); + + let event_store = Arc::new(Mutex::new(Vec::new())); + let router = setup_router( + serde_json::json!({}), + logging_config, + TestLogSubscriber { + event_metadata: event_store.clone(), + registry: Registry::default(), + }, + ) + .await; + let actual = query_with_router(router, request).await; + + assert_eq!(0, actual.errors.len()); + let mut logging_count = LoggingCount::default(); + for event in &*event_store.lock().unwrap() { + logging_count.count(event.fields()); + } + + assert_eq!(logging_count.supergraph_request_headers_count, 1); + assert_eq!(logging_count.supergraph_request_body_count, 0); + assert_eq!(logging_count.supergraph_response_body_count, 0); + assert_eq!(logging_count.supergraph_response_headers_count, 1); + assert_eq!(logging_count.subgraph_response_body_count, 0); + assert_eq!(logging_count.subgraph_response_headers_count, 4); + assert_eq!(logging_count.subgraph_request_headers_count, 4); + assert_eq!(logging_count.subgraph_request_body_count, 0); +} diff --git a/docs/source/configuration/logging.mdx b/docs/source/configuration/logging.mdx index ec9d47e2d9..832cdccddb 100644 --- a/docs/source/configuration/logging.mdx +++ b/docs/source/configuration/logging.mdx @@ -2,7 +2,7 @@ title: Logging in the Apollo Router --- -import { Link } from 'gatsby'; +import { Link } from "gatsby"; The Apollo Router accepts a command-line argument to set its log level: @@ -40,10 +40,10 @@ The router also accepts an `APOLLO_ROUTER_LOG` environment variable with the sam The Apollo Router supports two logging formats: -* [Basic logging](#basic-logging-via-shell), primarily for local development -* [JSON-formatted logging](#json-formatted-logging), for compatibility with searchable logging tools like Google Cloud Logging +- [Basic logging](#basic-logging-via-shell), primarily for local development +- [JSON-formatted logging](#json-formatted-logging), for compatibility with searchable logging tools like Google Cloud Logging -The router uses basic logging whenever an interactive shell session is attached, and it uses JSON-formatted logging otherwise (e.g., in CI and deployed environments). +The router uses basic logging whenever an interactive shell session is attached, and it uses JSON-formatted logging otherwise (e.g., in CI and deployed environments). You can also enforce a specific format in configuration. ### Basic logging via shell @@ -66,6 +66,33 @@ JSON-formatted logging provides compatibility with common searchable logging too {"timestamp":"2022-03-18T11:46:43.453993Z","level":"INFO","fields":{"message":"Stopped"},"target":"apollo_router"} ``` +## Basic configuration + +> This is part of an experimental feature, it means any time until it's stabilized (without the prefix `experimental_`) we might change the configuration shape or adding/removing features. +> If you want to give feedback or participate in that feature feel free to join [this discussion on GitHub](https://github.com/apollographql/router/discussions/1961). + +By default some of our logs containing sensitive data (like request body, response body, headers) are not displayed even if we are in the right log level. +For example if you need to display raw responses from one of your subgraph it won't be displayed by default. To enable them you have to configure it thanks to the `contains_attributes` setting. +Here is an example on how you can configure it: + +```yaml title="router.yaml" +telemetry: + experimental_logging: + format: json # By default it's "pretty" if you are in an interactive shell session + display_filename: true # Display filename where the log is coming from. Default: true + display_line_number: false # Display line number in the file where the log is coming from. Default: true + # If one of these headers matches we will log supergraph and subgraphs requests/responses + when_header: + - name: apollo-router-log-request + value: my_client + headers: true # default: false + body: true # default: false + # log request for all requests coming from Iphones + - name: user-agent + match: ^Mozilla/5.0 (iPhone* + headers: true +``` + ## Advanced configuration For more granular control over Apollo Router logging, see the [Env Logger documentation](https://docs.rs/env_logger/latest/env_logger/). diff --git a/licenses.html b/licenses.html index a670e5304b..96d6c76db9 100644 --- a/licenses.html +++ b/licenses.html @@ -44,8 +44,8 @@

Third Party Licenses

Overview of licenses:

    -
  • MIT License (74)
  • -
  • Apache License 2.0 (52)
  • +
  • MIT License (73)
  • +
  • Apache License 2.0 (53)
  • ISC License (9)
  • BSD 3-Clause "New" or "Revised" License (7)
  • Mozilla Public License 2.0 (2)
  • @@ -9327,6 +9327,30 @@

    Used by:

    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + + +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    # Contributing
    +
    +## License
    +
    +Licensed under either of
    +
    + * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
    + * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
    +
    +at your option.
    +
    +### Contribution
    +
    +Unless you explicitly state otherwise, any contribution intentionally submitted
    +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any
    +additional terms or conditions.
     
  • @@ -10743,40 +10767,6 @@

    Used by:

    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.Apache License - -
  • -
  • -

    MIT License

    -

    Used by:

    - -
    -Copyright (c) 2010 The Rust Project Developers
    -
    -Permission is hereby granted, free of charge, to any
    -person obtaining a copy of this software and associated
    -documentation files (the "Software"), to deal in the
    -Software without restriction, including without
    -limitation the rights to use, copy, modify, merge,
    -publish, distribute, sublicense, and/or sell copies of
    -the Software, and to permit persons to whom the Software
    -is furnished to do so, subject to the following
    -conditions:
    -
    -The above copyright notice and this permission notice
    -shall be included in all copies or substantial portions
    -of the Software.
    -
    -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
    -ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
    -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
    -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
    -SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
    -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
    -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
    -IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
    -DEALINGS IN THE SOFTWARE.
     
  • From 9da68900f3590eff3790d86e1ae8e17fe853291a Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Thu, 24 Nov 2022 16:41:33 +0100 Subject: [PATCH 22/45] handle mutations containing `@defer` (#2102) Fix #2099 to validate the primary part of a request with `@defer`, we reconstruct a partial query by walking through the query plan. The code responsible for that was not detecting mutations. while the PR in the current state fixes the issue, I'd like to refactor a bit so that we do not special case query and mutation (which will be annoying if we add subscription at some point), and moving the query reconstruction elsewhere, to work directly on the query instead of the query plan --- NEXT_CHANGELOG.md | 7 ++ apollo-router/src/query_planner/plan.rs | 36 +++++-- ..._service__tests__query_reconstruction.snap | 15 +++ .../src/services/supergraph_service.rs | 98 +++++++++++++++++++ 4 files changed, 147 insertions(+), 9 deletions(-) create mode 100644 apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__query_reconstruction.snap diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index caa4cc4530..8db12097fc 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -201,6 +201,13 @@ The error response will now contain the status code and status name. Example: `H By [@col](https://github.com/col) in https://github.com/apollographql/router/pull/2118 +### handle mutations containing @defer ([Issue #2099](https://github.com/apollographql/router/issues/2099)) + +The Router generates partial query shapes corresponding to the primary and deferred responses, +to validate the data sent back to the client. Those query shapes were invalid for mutations. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2102 + ## 🛠 Maintenance ## 📚 Documentation diff --git a/apollo-router/src/query_planner/plan.rs b/apollo-router/src/query_planner/plan.rs index 8db166f31e..873b6d4593 100644 --- a/apollo-router/src/query_planner/plan.rs +++ b/apollo-router/src/query_planner/plan.rs @@ -194,7 +194,18 @@ impl PlanNode { // re-create full query with the right path // parse the subselection let mut subselections = HashMap::new(); - self.collect_subselections(schema, &Path::default(), &mut subselections)?; + let operation_kind = if self.contains_mutations() { + OperationKind::Mutation + } else { + OperationKind::Query + }; + + self.collect_subselections( + schema, + &Path::default(), + &operation_kind, + &mut subselections, + )?; Ok(subselections) } @@ -203,6 +214,7 @@ impl PlanNode { &self, schema: &Schema, initial_path: &Path, + kind: &OperationKind, subselections: &mut HashMap, ) -> Result<(), QueryPlannerError> { // re-create full query with the right path @@ -210,7 +222,7 @@ impl PlanNode { match self { Self::Sequence { nodes } | Self::Parallel { nodes } => { nodes.iter().try_fold(subselections, |subs, current| { - current.collect_subselections(schema, initial_path, subs)?; + current.collect_subselections(schema, initial_path, kind, subs)?; Ok::<_, QueryPlannerError>(subs) })?; @@ -218,12 +230,12 @@ impl PlanNode { } Self::Flatten(node) => { node.node - .collect_subselections(schema, initial_path, subselections) + .collect_subselections(schema, initial_path, kind, subselections) } Self::Defer { primary, deferred } => { let primary_path = initial_path.join(&primary.path.clone().unwrap_or_default()); if let Some(primary_subselection) = &primary.subselection { - let query = reconstruct_full_query(&primary_path, primary_subselection); + let query = reconstruct_full_query(&primary_path, kind, primary_subselection); // ----------------------- Parse --------------------------------- let sub_selection = Query::parse(&query, schema, &Default::default())?; // ----------------------- END Parse --------------------------------- @@ -239,7 +251,7 @@ impl PlanNode { deferred.iter().try_fold(subselections, |subs, current| { if let Some(subselection) = ¤t.subselection { - let query = reconstruct_full_query(¤t.path, subselection); + let query = reconstruct_full_query(¤t.path, kind, subselection); // ----------------------- Parse --------------------------------- let sub_selection = Query::parse(&query, schema, &Default::default())?; // ----------------------- END Parse --------------------------------- @@ -256,6 +268,7 @@ impl PlanNode { current_node.collect_subselections( schema, &initial_path.join(¤t.path), + kind, subs, )?; } @@ -271,10 +284,10 @@ impl PlanNode { .. } => { if let Some(node) = if_clause { - node.collect_subselections(schema, initial_path, subselections)?; + node.collect_subselections(schema, initial_path, kind, subselections)?; } if let Some(node) = else_clause { - node.collect_subselections(schema, initial_path, subselections)?; + node.collect_subselections(schema, initial_path, kind, subselections)?; } Ok(()) } @@ -324,9 +337,14 @@ impl PlanNode { } } -fn reconstruct_full_query(path: &Path, subselection: &str) -> String { - let mut query = String::new(); +fn reconstruct_full_query(path: &Path, kind: &OperationKind, subselection: &str) -> String { let mut len = 0; + let mut query = match kind { + OperationKind::Query => "query", + OperationKind::Mutation => "mutation", + OperationKind::Subscription => "subscription", + } + .to_string(); for path_elt in path.iter() { match path_elt { json_ext::PathElement::Flatten | json_ext::PathElement::Index(_) => {} diff --git a/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__query_reconstruction.snap b/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__query_reconstruction.snap new file mode 100644 index 0000000000..6327709527 --- /dev/null +++ b/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__query_reconstruction.snap @@ -0,0 +1,15 @@ +--- +source: apollo-router/src/services/supergraph_service.rs +expression: stream.next_response().await.unwrap() +--- +{ + "errors": [ + { + "message": "invalid type for variable: 'userId'", + "extensions": { + "type": "ValidationInvalidTypeVariable", + "name": "userId" + } + } + ] +} diff --git a/apollo-router/src/services/supergraph_service.rs b/apollo-router/src/services/supergraph_service.rs index 8c00aa8941..f02fbf335c 100644 --- a/apollo-router/src/services/supergraph_service.rs +++ b/apollo-router/src/services/supergraph_service.rs @@ -710,4 +710,102 @@ mod tests { insta::assert_json_snapshot!(stream.next_response().await.unwrap()); } + + #[tokio::test] + async fn query_reconstruction() { + let schema = r#"schema + @link(url: "https://specs.apollo.dev/link/v1.0") + @link(url: "https://specs.apollo.dev/join/v0.2", for: EXECUTION) + @link(url: "https://specs.apollo.dev/tag/v0.2") + @link(url: "https://specs.apollo.dev/inaccessible/v0.2", for: SECURITY) + { + query: Query + mutation: Mutation + } + + directive @inaccessible on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + + directive @join__field(graph: join__Graph!, requires: join__FieldSet, provides: join__FieldSet, type: String, external: Boolean, override: String, usedOverridden: Boolean) repeatable on FIELD_DEFINITION | INPUT_FIELD_DEFINITION + + directive @join__graph(name: String!, url: String!) on ENUM_VALUE + + directive @join__implements(graph: join__Graph!, interface: String!) repeatable on OBJECT | INTERFACE + + directive @join__type(graph: join__Graph!, key: join__FieldSet, extension: Boolean! = false, resolvable: Boolean! = true) repeatable on OBJECT | INTERFACE | UNION | ENUM | INPUT_OBJECT | SCALAR + + directive @link(url: String, as: String, for: link__Purpose, import: [link__Import]) repeatable on SCHEMA + + directive @tag(name: String!) repeatable on FIELD_DEFINITION | OBJECT | INTERFACE | UNION | ARGUMENT_DEFINITION | SCALAR | ENUM | ENUM_VALUE | INPUT_OBJECT | INPUT_FIELD_DEFINITION + + scalar join__FieldSet + + enum join__Graph { + PRODUCTS @join__graph(name: "products", url: "http://products:4000/graphql") + USERS @join__graph(name: "users", url: "http://users:4000/graphql") + } + + scalar link__Import + + enum link__Purpose { + SECURITY + EXECUTION + } + + type MakePaymentResult + @join__type(graph: USERS) + { + id: ID! + paymentStatus: PaymentStatus + } + + type Mutation + @join__type(graph: USERS) + { + makePayment(userId: ID!): MakePaymentResult! + } + + + type PaymentStatus + @join__type(graph: USERS) + { + id: ID! + } + + type Query + @join__type(graph: PRODUCTS) + @join__type(graph: USERS) + { + name: String + } + "#; + + let service = TestHarness::builder() + .configuration_json(serde_json::json!({"include_subgraph_errors": { "all": true } })) + .unwrap() + .schema(schema) + .build() + .await + .unwrap(); + + let request = supergraph::Request::fake_builder() + .header("Accept", "multipart/mixed; deferSpec=20220824") + .query( + r#"mutation ($userId: ID!) { + makePayment(userId: $userId) { + id + ... @defer { + paymentStatus { + id + } + } + } + }"#, + ) + .build() + .unwrap(); + + let mut stream = service.oneshot(request).await.unwrap(); + + insta::assert_json_snapshot!(stream.next_response().await.unwrap()); + } } From c42f0d89bd11dc4777bbbf970576b956072ab434 Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Thu, 24 Nov 2022 16:43:01 +0100 Subject: [PATCH 23/45] refactor the APQ implementation (#2129) remove duplicated code --- NEXT_CHANGELOG.md | 9 ++ apollo-router/src/services/layers/apq.rs | 198 ++++++++--------------- 2 files changed, 80 insertions(+), 127 deletions(-) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 8db12097fc..c7624538af 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -209,6 +209,15 @@ to validate the data sent back to the client. Those query shapes were invalid fo By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2102 ## 🛠 Maintenance + + +### Refactor APQ ([PR #2129](https://github.com/apollographql/router/pull/2129)) + +Remove duplicated code. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2129 + + ## 📚 Documentation ### Docs: Update cors match regex example ([Issue #2151](https://github.com/apollographql/router/issues/2151)) diff --git a/apollo-router/src/services/layers/apq.rs b/apollo-router/src/services/layers/apq.rs index 60a2088471..6f8f309876 100644 --- a/apollo-router/src/services/layers/apq.rs +++ b/apollo-router/src/services/layers/apq.rs @@ -50,69 +50,9 @@ impl APQLayer { pub(crate) async fn apq_request( &self, - mut request: SupergraphRequest, + request: SupergraphRequest, ) -> Result { - let maybe_query_hash: Option<(String, Vec)> = request - .supergraph_request - .body() - .extensions - .get("persistedQuery") - .and_then(|value| serde_json_bytes::from_value::(value.clone()).ok()) - .and_then(|persisted_query| { - hex::decode(persisted_query.sha256hash.as_bytes()) - .ok() - .map(|decoded| (persisted_query.sha256hash, decoded)) - }); - - let body_query = request.supergraph_request.body().query.clone(); - - match (maybe_query_hash, body_query) { - (Some((query_hash, query_hash_bytes)), Some(query)) => { - if query_matches_hash(query.as_str(), query_hash_bytes.as_slice()) { - tracing::trace!("apq: cache insert"); - let _ = request.context.insert("persisted_query_hit", false); - self.cache.insert(format!("apq|{query_hash}"), query).await; - } else { - tracing::warn!("apq: graphql request doesn't match provided sha256Hash"); - } - Ok(request) - } - (Some((apq_hash, _)), _) => { - if let Ok(cached_query) = - self.cache.get(&format!("apq|{apq_hash}")).await.get().await - { - let _ = request.context.insert("persisted_query_hit", true); - tracing::trace!("apq: cache hit"); - request.supergraph_request.body_mut().query = Some(cached_query); - Ok(request) - } else { - tracing::trace!("apq: cache miss"); - let errors = vec![crate::error::Error { - message: "PersistedQueryNotFound".to_string(), - locations: Default::default(), - path: Default::default(), - extensions: serde_json_bytes::from_value(json!({ - "code": "PERSISTED_QUERY_NOT_FOUND", - "exception": { - "stacktrace": [ - "PersistedQueryNotFoundError: PersistedQueryNotFound", - ], - }, - })) - .unwrap(), - }]; - let res = SupergraphResponse::builder() - .data(Value::default()) - .errors(errors) - .context(request.context) - .build() - .expect("response is valid"); - - Err(res) - } - } - _ => Ok(request), - } + apq_request(&self.cache, request).await } } @@ -136,73 +76,12 @@ where fn layer(&self, service: S) -> Self::Service { let cache = self.cache.clone(); AsyncCheckpointService::new( - move |mut req| { + move |request| { let cache = cache.clone(); Box::pin(async move { - let maybe_query_hash: Option<(String, Vec)> = req - .supergraph_request - .body() - .extensions - .get("persistedQuery") - .and_then(|value| { - serde_json_bytes::from_value::(value.clone()).ok() - }) - .and_then(|persisted_query| { - hex::decode(persisted_query.sha256hash.as_bytes()) - .ok() - .map(|decoded| (persisted_query.sha256hash, decoded)) - }); - - let body_query = req.supergraph_request.body().query.clone(); - - match (maybe_query_hash, body_query) { - (Some((query_hash, query_hash_bytes)), Some(query)) => { - if query_matches_hash(query.as_str(), query_hash_bytes.as_slice()) { - tracing::trace!("apq: cache insert"); - let _ = req.context.insert("persisted_query_hit", false); - cache.insert(format!("apq|{query_hash}"), query).await; - } else { - tracing::warn!( - "apq: graphql request doesn't match provided sha256Hash" - ); - } - Ok(ControlFlow::Continue(req)) - } - (Some((apq_hash, _)), _) => { - if let Ok(cached_query) = - cache.get(&format!("apq|{apq_hash}")).await.get().await - { - let _ = req.context.insert("persisted_query_hit", true); - tracing::trace!("apq: cache hit"); - req.supergraph_request.body_mut().query = Some(cached_query); - Ok(ControlFlow::Continue(req)) - } else { - tracing::trace!("apq: cache miss"); - let errors = vec![crate::error::Error { - message: "PersistedQueryNotFound".to_string(), - locations: Default::default(), - path: Default::default(), - extensions: serde_json_bytes::from_value(json!({ - "code": "PERSISTED_QUERY_NOT_FOUND", - "exception": { - "stacktrace": [ - "PersistedQueryNotFoundError: PersistedQueryNotFound", - ], - }, - })) - .unwrap(), - }]; - let res = SupergraphResponse::builder() - .data(Value::default()) - .errors(errors) - .context(req.context) - .build() - .expect("response is valid"); - - Ok(ControlFlow::Break(res)) - } - } - _ => Ok(ControlFlow::Continue(req)), + match apq_request(&cache, request).await { + Ok(request) => Ok(ControlFlow::Continue(request)), + Err(response) => Ok(ControlFlow::Break(response)), } }) as BoxFuture< @@ -221,6 +100,71 @@ where } } +pub(crate) async fn apq_request( + cache: &DeduplicatingCache, + mut request: SupergraphRequest, +) -> Result { + let maybe_query_hash: Option<(String, Vec)> = request + .supergraph_request + .body() + .extensions + .get("persistedQuery") + .and_then(|value| serde_json_bytes::from_value::(value.clone()).ok()) + .and_then(|persisted_query| { + hex::decode(persisted_query.sha256hash.as_bytes()) + .ok() + .map(|decoded| (persisted_query.sha256hash, decoded)) + }); + + let body_query = request.supergraph_request.body().query.clone(); + + match (maybe_query_hash, body_query) { + (Some((query_hash, query_hash_bytes)), Some(query)) => { + if query_matches_hash(query.as_str(), query_hash_bytes.as_slice()) { + tracing::trace!("apq: cache insert"); + let _ = request.context.insert("persisted_query_hit", false); + cache.insert(format!("apq|{query_hash}"), query).await; + } else { + tracing::warn!("apq: graphql request doesn't match provided sha256Hash"); + } + Ok(request) + } + (Some((apq_hash, _)), _) => { + if let Ok(cached_query) = cache.get(&format!("apq|{apq_hash}")).await.get().await { + let _ = request.context.insert("persisted_query_hit", true); + tracing::trace!("apq: cache hit"); + request.supergraph_request.body_mut().query = Some(cached_query); + Ok(request) + } else { + tracing::trace!("apq: cache miss"); + let errors = vec![crate::error::Error { + message: "PersistedQueryNotFound".to_string(), + locations: Default::default(), + path: Default::default(), + extensions: serde_json_bytes::from_value(json!({ + "code": "PERSISTED_QUERY_NOT_FOUND", + "exception": { + "stacktrace": [ + "PersistedQueryNotFoundError: PersistedQueryNotFound", + ], + }, + })) + .unwrap(), + }]; + let res = SupergraphResponse::builder() + .data(Value::default()) + .errors(errors) + .context(request.context) + .build() + .expect("response is valid"); + + Err(res) + } + } + _ => Ok(request), + } +} + fn query_matches_hash(query: &str, hash: &[u8]) -> bool { let mut digest = Sha256::new(); digest.update(query.as_bytes()); From 20391054dd3d4beb7b0cac57763fee8e7dfe08cb Mon Sep 17 00:00:00 2001 From: Coenen Benjamin Date: Thu, 24 Nov 2022 17:40:46 +0100 Subject: [PATCH 24/45] add configuration for trace_id (#2131) close #2080 Configuration example: ```yaml telemetry: tracing: experimental_response_trace_id: enabled: true # default: false header_name: "my-trace-id" # default: "apollo-trace-id" propagation: request: header_name: "x-request-id" jaeger: true ``` Signed-off-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> Co-authored-by: Geoffroy Couprie --- NEXT_CHANGELOG.md | 20 ++++ apollo-router/src/axum_factory/utils.rs | 4 +- apollo-router/src/configuration/mod.rs | 23 ++++ ...nfiguration__tests__schema_generation.snap | 34 ++++++ apollo-router/src/plugins/telemetry/apollo.rs | 7 ++ apollo-router/src/plugins/telemetry/config.rs | 30 +++++ apollo-router/src/plugins/telemetry/mod.rs | 112 +++++++++++++++++- .../src/plugins/telemetry/tracing/apollo.rs | 2 + .../telemetry/tracing/apollo_telemetry.rs | 32 +++-- apollo-router/src/router.rs | 16 ++- .../src/services/supergraph_service.rs | 13 -- apollo-router/src/testdata/jaeger.router.yaml | 3 + apollo-router/tests/common.rs | 28 ++--- apollo-router/tests/jaeger_test.rs | 7 +- apollo-router/tests/zipkin_test.rs | 3 +- docs/source/configuration/tracing.mdx | 22 +++- 16 files changed, 307 insertions(+), 49 deletions(-) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index c7624538af..c86d9ac0e8 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -71,6 +71,26 @@ There is now a separate subcommand for config related operations: ## 🚀 Features +### Add configuration for trace ID ([Issue #2080](https://github.com/apollographql/router/issues/2080)) + +If you want to expose in response headers the generated trace ID or the one you provided using propagation headers you can use this configuration: + +```yaml title="router.yaml" +telemetry: + tracing: + experimental_response_trace_id: + enabled: true # default: false + header_name: "my-trace-id" # default: "apollo-trace-id" + propagation: + # If you have your own way to generate a trace id and you want to pass it via a custom request header + request: + header_name: my-trace-id +``` + +Using this configuration you will have a response header called `my-trace-id` containing the trace ID. It could help you to debug a specific query if you want to grep your log with this trace id to have more context. + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/2131 + ### Add configuration for logging and add more logs By default some logs containing sensible data (like request body, response body, headers) are not displayed even if we set the right log level. diff --git a/apollo-router/src/axum_factory/utils.rs b/apollo-router/src/axum_factory/utils.rs index 8ba5e6cdfb..d855f1afa8 100644 --- a/apollo-router/src/axum_factory/utils.rs +++ b/apollo-router/src/axum_factory/utils.rs @@ -238,7 +238,9 @@ impl MakeSpan for PropagatingMakeSpan { // If there was no span from the request then it will default to the NOOP span. // Attaching the NOOP span has the effect of preventing further tracing. - if context.span().span_context().is_valid() { + if context.span().span_context().is_valid() + || context.span().span_context().trace_id() != opentelemetry::trace::TraceId::INVALID + { // We have a valid remote span, attach it to the current thread before creating the root span. let _context_guard = context.attach(); tracing::span!( diff --git a/apollo-router/src/configuration/mod.rs b/apollo-router/src/configuration/mod.rs index 7819360d3b..94d00331ca 100644 --- a/apollo-router/src/configuration/mod.rs +++ b/apollo-router/src/configuration/mod.rs @@ -213,6 +213,29 @@ impl Configuration { self.apollo_plugins .plugins .insert("include_subgraph_errors".to_string(), json!({"all": true})); + // Enable experimental_response_trace_id + self.apollo_plugins + .plugins + .get_mut("telemetry") + .expect("telemetry plugin must be initialized at this point") + .as_object_mut() + .expect("configuration for telemetry must be an object") + .entry("tracing") + .and_modify(|e| { + e.as_object_mut() + .expect("configuration for telemetry.tracing must be an object") + .entry("experimental_response_trace_id") + .and_modify(|e| *e = json!({"enabled": true, "header_name": null})) + .or_insert_with(|| json!({"enabled": true, "header_name": null})); + }) + .or_insert_with(|| { + json!({ + "experimental_response_trace_id": { + "enabled": true, + "header_name": null + } + }) + }); self.supergraph.introspection = true; self.sandbox.enabled = true; self.homepage.enabled = false; diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index 1d888d171d..2c8fd06bca 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -1802,6 +1802,25 @@ expression: "&schema" "additionalProperties": false, "nullable": true }, + "experimental_response_trace_id": { + "description": "A way to expose trace id in response headers", + "type": "object", + "required": [ + "enabled" + ], + "properties": { + "enabled": { + "description": "Expose the trace_id in response headers", + "type": "boolean" + }, + "header_name": { + "description": "Choose the header name to expose trace_id (default: apollo-trace-id)", + "type": "string", + "nullable": true + } + }, + "additionalProperties": false + }, "jaeger": { "type": "object", "oneOf": [ @@ -1951,6 +1970,21 @@ expression: "&schema" "type": "boolean", "nullable": true }, + "request": { + "description": "Select a custom request header to set your own trace_id (header value must be convertible from hexadecimal to set a correct trace_id)", + "type": "object", + "required": [ + "header_name" + ], + "properties": { + "header_name": { + "description": "Choose the header name to expose trace_id (default: apollo-trace-id)", + "type": "string" + } + }, + "additionalProperties": false, + "nullable": true + }, "trace_context": { "type": "boolean", "nullable": true diff --git a/apollo-router/src/plugins/telemetry/apollo.rs b/apollo-router/src/plugins/telemetry/apollo.rs index 25237d4acf..b38757fd91 100644 --- a/apollo-router/src/plugins/telemetry/apollo.rs +++ b/apollo-router/src/plugins/telemetry/apollo.rs @@ -12,6 +12,7 @@ use serde::Deserialize; use serde::Serialize; use url::Url; +use super::config::ExposeTraceId; use super::metrics::apollo::studio::ContextualizedStats; use super::metrics::apollo::studio::SingleStats; use super::metrics::apollo::studio::SingleStatsReport; @@ -79,6 +80,11 @@ pub(crate) struct Config { // The purpose is to allow is to pass this in to the plugin. #[schemars(skip)] pub(crate) schema_id: String, + + // Skipped because only useful at runtime, it's a copy of the configuration in tracing config + #[schemars(skip)] + #[serde(skip)] + pub(crate) expose_trace_id: ExposeTraceId, } fn apollo_key() -> Option { @@ -122,6 +128,7 @@ impl Default for Config { field_level_instrumentation_sampler: Some(SamplerOption::TraceIdRatioBased(0.01)), send_headers: ForwardHeaders::None, send_variable_values: ForwardValues::None, + expose_trace_id: ExposeTraceId::default(), } } } diff --git a/apollo-router/src/plugins/telemetry/config.rs b/apollo-router/src/plugins/telemetry/config.rs index 920895ce10..fb810649f3 100644 --- a/apollo-router/src/plugins/telemetry/config.rs +++ b/apollo-router/src/plugins/telemetry/config.rs @@ -2,6 +2,7 @@ use std::borrow::Cow; use std::collections::BTreeMap; +use axum::headers::HeaderName; use opentelemetry::sdk::Resource; use opentelemetry::Array; use opentelemetry::KeyValue; @@ -13,6 +14,8 @@ use serde::Deserialize; use super::metrics::MetricsAttributesConf; use super::*; use crate::configuration::ConfigurationError; +use crate::plugin::serde::deserialize_header_name; +use crate::plugin::serde::deserialize_option_header_name; use crate::plugin::serde::deserialize_regex; use crate::plugins::telemetry::metrics; @@ -82,6 +85,11 @@ pub(crate) struct MetricsCommon { #[derive(Clone, Default, Debug, Deserialize, JsonSchema)] #[serde(deny_unknown_fields, rename_all = "snake_case")] pub(crate) struct Tracing { + // TODO: when deleting the `experimental_` prefix, check the usage when enabling dev mode + // When deleting, put a #[serde(alias = "experimental_response_trace_id")] if we don't want to break things + /// A way to expose trace id in response headers + #[serde(default, rename = "experimental_response_trace_id")] + pub(crate) response_trace_id: ExposeTraceId, pub(crate) propagation: Option, pub(crate) trace_config: Option, pub(crate) otlp: Option, @@ -246,9 +254,22 @@ impl Default for LoggingFormat { } } +#[derive(Clone, Default, Debug, Deserialize, JsonSchema)] +#[serde(deny_unknown_fields, rename_all = "snake_case")] +pub(crate) struct ExposeTraceId { + /// Expose the trace_id in response headers + pub(crate) enabled: bool, + /// Choose the header name to expose trace_id (default: apollo-trace-id) + #[schemars(with = "Option")] + #[serde(deserialize_with = "deserialize_option_header_name")] + pub(crate) header_name: Option, +} + #[derive(Clone, Default, Debug, Deserialize, JsonSchema)] #[serde(deny_unknown_fields, rename_all = "snake_case")] pub(crate) struct Propagation { + /// Select a custom request header to set your own trace_id (header value must be convertible from hexadecimal to set a correct trace_id) + pub(crate) request: Option, pub(crate) baggage: Option, pub(crate) trace_context: Option, pub(crate) jaeger: Option, @@ -256,6 +277,15 @@ pub(crate) struct Propagation { pub(crate) zipkin: Option, } +#[derive(Clone, Debug, Deserialize, JsonSchema)] +#[serde(deny_unknown_fields, rename_all = "snake_case")] +pub(crate) struct PropagationRequestTraceId { + /// Choose the header name to expose trace_id (default: apollo-trace-id) + #[schemars(with = "String")] + #[serde(deserialize_with = "deserialize_header_name")] + pub(crate) header_name: HeaderName, +} + #[derive(Default, Debug, Clone, Deserialize, JsonSchema)] #[serde(deny_unknown_fields)] #[non_exhaustive] diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index cebd05f7b1..33295f8d1c 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -16,6 +16,7 @@ use ::tracing::info_span; use ::tracing::subscriber::set_global_default; use ::tracing::Span; use ::tracing::Subscriber; +use axum::headers::HeaderName; use futures::future::ready; use futures::future::BoxFuture; use futures::stream::once; @@ -27,13 +28,20 @@ use http::HeaderValue; use multimap::MultiMap; use once_cell::sync::OnceCell; use opentelemetry::global; +use opentelemetry::propagation::text_map_propagator::FieldIter; +use opentelemetry::propagation::Extractor; +use opentelemetry::propagation::Injector; use opentelemetry::propagation::TextMapPropagator; use opentelemetry::sdk::propagation::BaggagePropagator; use opentelemetry::sdk::propagation::TextMapCompositePropagator; use opentelemetry::sdk::propagation::TraceContextPropagator; use opentelemetry::sdk::trace::Builder; +use opentelemetry::trace::SpanContext; +use opentelemetry::trace::SpanId; use opentelemetry::trace::SpanKind; use opentelemetry::trace::TraceContextExt; +use opentelemetry::trace::TraceFlags; +use opentelemetry::trace::TraceState; use opentelemetry::trace::TracerProvider; use opentelemetry::KeyValue; use rand::Rng; @@ -90,6 +98,7 @@ use crate::spaceport::server::ReportSpaceport; use crate::spaceport::StatsContext; use crate::subgraph::Request; use crate::subgraph::Response; +use crate::tracer::TraceId; use crate::Context; use crate::ExecutionRequest; use crate::ListenAddr; @@ -117,6 +126,8 @@ pub(crate) const FTV1_DO_NOT_SAMPLE: &str = "apollo_telemetry::studio::ftv1_do_n pub(crate) const LOGGING_DISPLAY_HEADERS: &str = "apollo_telemetry::logging::display_headers"; pub(crate) const LOGGING_DISPLAY_BODY: &str = "apollo_telemetry::logging::display_body"; const DEFAULT_SERVICE_NAME: &str = "apollo-router"; +const GLOBAL_TRACER_NAME: &str = "apollo-router"; +const DEFAULT_EXPOSE_TRACE_ID_HEADER: &str = "apollo-trace-id"; static TELEMETRY_LOADED: OnceCell = OnceCell::new(); static TELEMETRY_REFCOUNT: AtomicU8 = AtomicU8::new(0); @@ -191,13 +202,15 @@ impl Plugin for Telemetry { let metrics_sender = self.apollo_metrics_sender.clone(); let metrics = BasicMetrics::new(&self.meter_provider); let config = Arc::new(self.config.clone()); + let config_map_res_first = config.clone(); let config_map_res = config.clone(); ServiceBuilder::new() .instrument(Self::supergraph_service_span( self.field_level_instrumentation_ratio, config.apollo.clone().unwrap_or_default(), )) - .map_response(|resp: SupergraphResponse| { + .map_response(move |mut resp: SupergraphResponse| { + let config = config_map_res_first.clone(); if let Ok(Some(usage_reporting)) = resp.context.get::<_, UsageReporting>(USAGE_REPORTING) { @@ -207,6 +220,22 @@ impl Plugin for Telemetry { &usage_reporting.stats_report_key.as_str(), ); } + // To expose trace_id or not + let expose_trace_id_header = config.tracing.as_ref().and_then(|t| { + t.response_trace_id.enabled.then(|| { + t.response_trace_id + .header_name + .clone() + .unwrap_or(HeaderName::from_static(DEFAULT_EXPOSE_TRACE_ID_HEADER)) + }) + }); + if let (Some(header_name), Some(trace_id)) = ( + expose_trace_id_header, + TraceId::maybe_new().and_then(|t| HeaderValue::from_str(&t.to_string()).ok()), + ) { + resp.response.headers_mut().append(header_name, trace_id); + } + if resp.context.contains_key(LOGGING_DISPLAY_HEADERS) { ::tracing::info!(http.response.headers = ?resp.response.headers(), "Supergraph response headers"); } @@ -228,6 +257,7 @@ impl Plugin for Telemetry { let metrics = metrics.clone(); let sender = metrics_sender.clone(); let start = Instant::now(); + async move { let mut result: Result = fut.await; result = Self::update_otel_metrics( @@ -375,6 +405,9 @@ impl Telemetry { .apollo .as_mut() .expect("telemetry apollo config must be present"); + if let Some(tracing_conf) = &config.tracing { + apollo.expose_trace_id = tracing_conf.response_trace_id.clone(); + } // If we have key and graph ref but no endpoint we start embedded spaceport let spaceport = match apollo { @@ -413,7 +446,7 @@ impl Telemetry { let tracer_provider = Self::create_tracer_provider(&config)?; let tracer = tracer_provider.versioned_tracer( - "apollo-router", + GLOBAL_TRACER_NAME, Some(env!("CARGO_PKG_VERSION")), None, ); @@ -575,6 +608,11 @@ impl Telemetry { if propagation.datadog.unwrap_or_default() || tracing.datadog.is_some() { propagators.push(Box::new(opentelemetry_datadog::DatadogPropagator::default())); } + if let Some(from_request_header) = &propagation.request.as_ref().map(|r| &r.header_name) { + propagators.push(Box::new(CustomTraceIdPropagator::new( + from_request_header.to_string(), + ))); + } TextMapCompositePropagator::new(propagators) } @@ -1301,6 +1339,76 @@ impl ApolloFtv1Handler { } } +/// CustomTraceIdPropagator to set custom trace_id for our tracing system +/// coming from headers +#[derive(Debug)] +struct CustomTraceIdPropagator { + header_name: String, + fields: [String; 1], +} + +impl CustomTraceIdPropagator { + fn new(header_name: String) -> Self { + Self { + fields: [header_name.clone()], + header_name, + } + } + + fn extract_span_context(&self, extractor: &dyn Extractor) -> Option { + let trace_id = extractor.get(&self.header_name)?; + + opentelemetry::global::tracer_provider().versioned_tracer( + GLOBAL_TRACER_NAME, + Some(env!("CARGO_PKG_VERSION")), + None, + ); + // extract trace id + let trace_id = match opentelemetry::trace::TraceId::from_hex(trace_id) { + Ok(trace_id) => trace_id, + Err(err) => { + ::tracing::error!("cannot generate custom trace_id: {err}"); + return None; + } + }; + + SpanContext::new( + trace_id, + SpanId::INVALID, + TraceFlags::default().with_sampled(true), + true, + TraceState::default(), + ) + .into() + } +} + +impl TextMapPropagator for CustomTraceIdPropagator { + fn inject_context(&self, cx: &opentelemetry::Context, injector: &mut dyn Injector) { + let span = cx.span(); + let span_context = span.span_context(); + if span_context.is_valid() { + let header_value = format!("{}", span_context.trace_id()); + injector.set(&self.header_name, header_value); + } + } + + fn extract_with_context( + &self, + cx: &opentelemetry::Context, + extractor: &dyn Extractor, + ) -> opentelemetry::Context { + cx.with_remote_span_context( + self.extract_span_context(extractor) + .unwrap_or_else(SpanContext::empty_context), + ) + } + + fn fields(&self) -> FieldIter<'_> { + FieldIter::new(self.fields.as_ref()) + } +} + // // Please ensure that any tests added to the tests module use the tokio multi-threaded test executor. // diff --git a/apollo-router/src/plugins/telemetry/tracing/apollo.rs b/apollo-router/src/plugins/telemetry/tracing/apollo.rs index 788999d692..969c1a2247 100644 --- a/apollo-router/src/plugins/telemetry/tracing/apollo.rs +++ b/apollo-router/src/plugins/telemetry/tracing/apollo.rs @@ -21,11 +21,13 @@ impl TracingConfigurator for Config { schema_id, buffer_size, field_level_instrumentation_sampler, + expose_trace_id, .. } => { tracing::debug!("configuring exporter to Studio"); let exporter = apollo_telemetry::Exporter::builder() + .expose_trace_id_config(expose_trace_id.clone()) .trace_config(trace_config.clone()) .endpoint(endpoint.clone()) .apollo_key(key) diff --git a/apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs b/apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs index 39affe53de..c8cddba186 100644 --- a/apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs +++ b/apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs @@ -57,6 +57,7 @@ const FTV1_DO_NOT_SAMPLE_REASON: Key = Key::from_static_str("ftv1.do_not_sample_ const SUBGRAPH_NAME: Key = Key::from_static_str("apollo.subgraph.name"); const CLIENT_NAME: Key = Key::from_static_str("client.name"); const CLIENT_VERSION: Key = Key::from_static_str("client.version"); +pub(crate) const DEFAULT_TRACE_ID_HEADER_NAME: &str = "apollo-trace-id"; #[derive(Error, Debug)] pub(crate) enum Error { @@ -86,6 +87,7 @@ pub(crate) enum Error { #[derive(Derivative)] #[derivative(Debug)] pub(crate) struct Exporter { + expose_trace_id_config: config::ExposeTraceId, trace_config: config::Trace, spans_by_parent_id: LruCache>, endpoint: Url, @@ -111,6 +113,7 @@ enum TreeData { impl Exporter { #[builder] pub(crate) fn new( + expose_trace_id_config: config::ExposeTraceId, trace_config: config::Trace, endpoint: Url, apollo_key: String, @@ -123,6 +126,7 @@ impl Exporter { let apollo_exporter = ApolloExporter::new(&endpoint, &apollo_key, &apollo_graph_ref, &schema_id)?; Ok(Self { + expose_trace_id_config, spans_by_parent_id: LruCache::new(buffer_size), trace_config, endpoint, @@ -427,18 +431,28 @@ impl Exporter { .map(|(header_name, value)| (header_name.to_lowercase(), Values { value })) .collect(); // For now, only trace_id - // let mut response_headers = HashMap::with_capacity(1); - // FIXME: uncomment later - // response_headers.insert( - // String::from("apollo_trace_id"), - // Values { - // value: vec![span.span_context.trace_id().to_string()], - // }, - // ); + let response_headers = if self.expose_trace_id_config.enabled { + let mut res = HashMap::with_capacity(1); + res.insert( + self.expose_trace_id_config + .header_name + .as_ref() + .map(|h| h.to_string()) + .unwrap_or_else(|| DEFAULT_TRACE_ID_HEADER_NAME.to_string()), + Values { + value: vec![span.span_context.trace_id().to_string()], + }, + ); + + res + } else { + HashMap::new() + }; + Http { method: method.into(), request_headers, - response_headers: HashMap::new(), + response_headers, status_code: 0, } } diff --git a/apollo-router/src/router.rs b/apollo-router/src/router.rs index b671274241..88d583f5c4 100644 --- a/apollo-router/src/router.rs +++ b/apollo-router/src/router.rs @@ -743,10 +743,18 @@ mod tests { #[tokio::test(flavor = "multi_thread")] async fn config_dev_mode_without_file() { - let mut stream = - ConfigurationSource::from(Configuration::builder().dev(true).build().unwrap()) - .into_stream() - .boxed(); + let telemetry_configuration = serde_json::json!({ + "telemetry": {} + }); + let mut stream = ConfigurationSource::from( + Configuration::builder() + .apollo_plugin("telemetry", telemetry_configuration) + .dev(true) + .build() + .unwrap(), + ) + .into_stream() + .boxed(); let cfg = match stream.next().await.unwrap() { UpdateConfiguration(configuration) => configuration, diff --git a/apollo-router/src/services/supergraph_service.rs b/apollo-router/src/services/supergraph_service.rs index f02fbf335c..c409359174 100644 --- a/apollo-router/src/services/supergraph_service.rs +++ b/apollo-router/src/services/supergraph_service.rs @@ -120,19 +120,6 @@ where .build() .expect("building a response like this should not fail")) }); - // FIXME: Enable it later - // .and_then(|mut res| async move { - // if let Some(trace_id) = TraceId::maybe_new().map(|t| t.to_string()) { - // let header_value = HeaderValue::from_str(trace_id.as_str()); - // if let Ok(header_value) = header_value { - // res.response - // .headers_mut() - // .insert(HeaderName::from_static("apollo_trace_id"), header_value); - // } - // } - - // Ok(res) - // }); Box::pin(fut) } diff --git a/apollo-router/src/testdata/jaeger.router.yaml b/apollo-router/src/testdata/jaeger.router.yaml index 3741c66d64..fcc9a7b0e6 100644 --- a/apollo-router/src/testdata/jaeger.router.yaml +++ b/apollo-router/src/testdata/jaeger.router.yaml @@ -1,5 +1,8 @@ telemetry: tracing: + experimental_response_trace_id: + enabled: true + header_name: apollo-custom-trace-id propagation: jaeger: true trace_config: diff --git a/apollo-router/tests/common.rs b/apollo-router/tests/common.rs index 0594e3bf90..b67b44d5b4 100644 --- a/apollo-router/tests/common.rs +++ b/apollo-router/tests/common.rs @@ -9,14 +9,11 @@ use std::time::Duration; use http::header::ACCEPT; use http::header::CONTENT_TYPE; -use http::Method; -use http::Request; -use http::Uri; use jsonpath_lib::Selector; use opentelemetry::global; use opentelemetry::propagation::TextMapPropagator; use opentelemetry::sdk::trace::Tracer; -use opentelemetry_http::HttpClient; +use serde_json::json; use serde_json::Value; use tower::BoxError; use tracing::info_span; @@ -65,8 +62,8 @@ impl TracingTest { Self { test_config_location: test_config_location.clone(), router: Command::new(router_location) - .stdout(Stdio::null()) - .stderr(Stdio::null()) + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()) .args([ "--hr", "--config", @@ -89,20 +86,20 @@ impl TracingTest { Ok(()) } - pub async fn run_query(&self) -> String { + pub async fn run_query(&self) -> (String, reqwest::Response) { let client = reqwest::Client::new(); let id = Uuid::new_v4().to_string(); let span = info_span!("client_request", unit_test = id.as_str()); let _span_guard = span.enter(); for _i in 0..100 { - let mut request = Request::builder() - .method(Method::POST) + let mut request = client + .post("http://localhost:4000") .header(CONTENT_TYPE, "application/json") .header("apollographql-client-name", "custom_name") .header("apollographql-client-version", "1.0") - .uri(Uri::from_static("http://localhost:4000")) - .body(r#"{"query":"{topProducts{name}}","variables":{}}"#.into()) + .json(&json!({"query":"{topProducts{name}}","variables":{}})) + .build() .unwrap(); global::get_text_map_propagator(|propagator| { @@ -112,13 +109,10 @@ impl TracingTest { ); }); request.headers_mut().remove(ACCEPT); - match client.send(request).await { + match client.execute(request).await { Ok(result) => { - tracing::debug!( - "got {}", - String::from_utf8(result.body().to_vec()).unwrap_or_default() - ); - return id; + tracing::debug!("got {result:?}"); + return (id, result); } Err(e) => { eprintln!("query failed: {}", e); diff --git a/apollo-router/tests/jaeger_test.rs b/apollo-router/tests/jaeger_test.rs index 3caac2ef6f..e352a829ad 100644 --- a/apollo-router/tests/jaeger_test.rs +++ b/apollo-router/tests/jaeger_test.rs @@ -42,7 +42,12 @@ async fn test_jaeger_tracing() -> Result<(), BoxError> { tokio::task::spawn(subgraph()); for _ in 0..10 { - let id = router.run_query().await; + let (id, result) = router.run_query().await; + assert!(!result + .headers() + .get("apollo-custom-trace-id") + .unwrap() + .is_empty()); query_jaeger_for_trace(id).await?; router.touch_config()?; tokio::time::sleep(Duration::from_millis(100)).await; diff --git a/apollo-router/tests/zipkin_test.rs b/apollo-router/tests/zipkin_test.rs index c6387c61b7..9b7557c714 100644 --- a/apollo-router/tests/zipkin_test.rs +++ b/apollo-router/tests/zipkin_test.rs @@ -16,7 +16,8 @@ async fn test_tracing() -> Result<(), BoxError> { opentelemetry_zipkin::Propagator::new(), Path::new("zipkin.router.yaml"), ); - router.run_query().await; + let (_, response) = router.run_query().await; + assert!(response.headers().get("apollo-trace-id").is_none()); Ok(()) } diff --git a/docs/source/configuration/tracing.mdx b/docs/source/configuration/tracing.mdx index c7641baef5..8a3a7972fc 100644 --- a/docs/source/configuration/tracing.mdx +++ b/docs/source/configuration/tracing.mdx @@ -81,9 +81,30 @@ telemetry: # https://zipkin.io/ (compliant with opentracing) zipkin: false + # If you have your own way to generate a trace id and you want to pass it via a custom request header + request: + header_name: my-trace-id ``` + Specifying explicit propagation is generally only required if you're using an exporter that supports multiple trace ID formats (e.g., OpenTelemetry Collector, Jaeger, or OpenTracing compatible exporters). +### Trace ID + +> This is part of an experimental feature, it means any time until it's stabilized (without the prefix `experimental_`) we might change the configuration shape or adding/removing features. +> If you want to give feedback or participate in that feature feel free to join [this discussion on GitHub](https://github.com/apollographql/router/discussions/2147). + +If you want to expose in response headers the generated trace ID or the one you provided using propagation headers you can use this configuration: + +```yaml title="router.yaml" +telemetry: + tracing: + experimental_response_trace_id: + enabled: true # default: false + header_name: "my-trace-id" # default: "apollo-trace-id" +``` + +Using this configuration you will have a response header called `my-trace-id` containing the trace ID. It could help you to debug a specific query if you want to grep your log with this trace id to have more context. + ## Using Datadog The Apollo Router can be configured to connect to either the default agent address or a URL. @@ -109,7 +130,6 @@ telemetry: agent: # Either 'default' or a URL endpoint: docker_jaeger:14268 - ``` ### Collector config From 4d59d906ad33b2a7645015560bade9e9d3cc08fd Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Thu, 24 Nov 2022 17:00:13 +0000 Subject: [PATCH 25/45] fix(deps): update rust crate flate2 to 1.0.25 --- Cargo.lock | 17 +++++++++++++---- apollo-router/Cargo.toml | 2 +- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fea151b4d7..dc6f894466 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -520,7 +520,7 @@ dependencies = [ "cc", "cfg-if", "libc", - "miniz_oxide", + "miniz_oxide 0.5.4", "object", "rustc-demangle", ] @@ -1598,12 +1598,12 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.24" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" +checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" dependencies = [ "crc32fast", - "miniz_oxide", + "miniz_oxide 0.6.2", ] [[package]] @@ -2781,6 +2781,15 @@ dependencies = [ "adler", ] +[[package]] +name = "miniz_oxide" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" +dependencies = [ + "adler", +] + [[package]] name = "mintex" version = "0.1.2" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index cd367acb24..671032a8bf 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -73,7 +73,7 @@ dhat = { version = "0.3.2", optional = true } diff = "0.1.13" directories = "4.0.1" displaydoc = "0.2" -flate2 = "1.0.24" +flate2 = "1.0.25" futures = { version = "0.3.25", features = ["thread-pool"] } graphql_client = "0.11.0" hex = "0.4.3" From cfb421a5646de4ae5d5634415c86336d70c6fb90 Mon Sep 17 00:00:00 2001 From: Bryn Cooke Date: Mon, 28 Nov 2022 09:56:31 +0000 Subject: [PATCH 26/45] Fixes #2123 (#2162) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Issue was introduced with #2116 but no release had this in. Move operations would insert data in the config due to the delete magic value always getting added. Now we check before adding such values. We may need to move to fluvio-jolt longer term. Co-authored-by: bryn --- NEXT_CHANGELOG.md | 2 +- ...pgrade__test__move_non_existent_field.snap | 7 +++ apollo-router/src/configuration/upgrade.rs | 54 ++++++++++++++----- 3 files changed, 50 insertions(+), 13 deletions(-) create mode 100644 apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__move_non_existent_field.snap diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index c86d9ac0e8..1508b75baa 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -186,7 +186,7 @@ From the CLI users can run: There are situations where comments and whitespace are not preserved. This may be improved in future. -By [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographql/router/pull/2116 +By [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographql/router/pull/2116, https://github.com/apollographql/router/pull/2162 ### *Experimental* subgraph request retry ([Issue #338](https://github.com/apollographql/router/issues/338), [Issue #1956](https://github.com/apollographql/router/issues/1956)) diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__move_non_existent_field.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__move_non_existent_field.snap new file mode 100644 index 0000000000..58f3973636 --- /dev/null +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__upgrade__test__move_non_existent_field.snap @@ -0,0 +1,7 @@ +--- +source: apollo-router/src/configuration/upgrade.rs +expression: "apply_migration(&json!({ \"should\" : \"stay\" }),\n &Migration::builder().action(Action::Move {\n from: \"obj.field1\".to_string(),\n to: \"new.obj.field1\".to_string(),\n }).description(\"move field1\").build()).expect(\"expected successful migration\")" +--- +{ + "should": "stay" +} diff --git a/apollo-router/src/configuration/upgrade.rs b/apollo-router/src/configuration/upgrade.rs index 9ed15c5ea8..43c8ff40ab 100644 --- a/apollo-router/src/configuration/upgrade.rs +++ b/apollo-router/src/configuration/upgrade.rs @@ -70,22 +70,37 @@ fn apply_migration(config: &Value, migration: &Migration) -> Result { - // Deleting isn't actually supported by protus so we add a magic value to delete later - transformer_builder = transformer_builder.add_action( - Parser::parse(REMOVAL_EXPRESSION, path).expect("migration must be valid"), - ); + if !jsonpath_lib::select(config, &format!("$.{}", path)) + .unwrap_or_default() + .is_empty() + { + // Deleting isn't actually supported by protus so we add a magic value to delete later + transformer_builder = transformer_builder.add_action( + Parser::parse(REMOVAL_EXPRESSION, path).expect("migration must be valid"), + ); + } } Action::Copy { from, to } => { - transformer_builder = transformer_builder - .add_action(Parser::parse(from, to).expect("migration must be valid")); + if !jsonpath_lib::select(config, &format!("$.{}", from)) + .unwrap_or_default() + .is_empty() + { + transformer_builder = transformer_builder + .add_action(Parser::parse(from, to).expect("migration must be valid")); + } } Action::Move { from, to } => { - transformer_builder = transformer_builder - .add_action(Parser::parse(from, to).expect("migration must be valid")); - // Deleting isn't actually supported by protus so we add a magic value to delete later - transformer_builder = transformer_builder.add_action( - Parser::parse(REMOVAL_EXPRESSION, from).expect("migration must be valid"), - ); + if !jsonpath_lib::select(config, &format!("$.{}", from)) + .unwrap_or_default() + .is_empty() + { + transformer_builder = transformer_builder + .add_action(Parser::parse(from, to).expect("migration must be valid")); + // Deleting isn't actually supported by protus so we add a magic value to delete later + transformer_builder = transformer_builder.add_action( + Parser::parse(REMOVAL_EXPRESSION, from).expect("migration must be valid"), + ); + } } } } @@ -258,6 +273,21 @@ mod test { .expect("expected successful migration")); } + #[test] + fn move_non_existent_field() { + insta::assert_json_snapshot!(apply_migration( + &json!({"should": "stay"}), + &Migration::builder() + .action(Action::Move { + from: "obj.field1".to_string(), + to: "new.obj.field1".to_string() + }) + .description("move field1") + .build(), + ) + .expect("expected successful migration")); + } + #[test] fn move_array_element() { insta::assert_json_snapshot!(apply_migration( From a4fd9fc4163029da3338ab6635e6a0c58009990f Mon Sep 17 00:00:00 2001 From: Gary Pennington Date: Mon, 28 Nov 2022 12:29:39 +0000 Subject: [PATCH 27/45] recent changes to add support for local repos broke the script (#2163) The addition of the `-r` flag broke the existing logic for processing the `-b` flag with non `-r` repos. This fixes the logic and prints additional information about which repo is being used to build the image. --- NEXT_CHANGELOG.md | 8 +++++++- dockerfiles/diy/build_docker_image.sh | 8 ++++---- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 1508b75baa..478d707e7e 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -215,6 +215,12 @@ By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/p ## 🐛 Fixes +### fix build_docker_image.sh script when using default repo ([PR #2163](https://github.com/apollographql/router/pull/2163)) + +Adding the `-r` flag recently broke the existing functionality to build from the default repo using `-b`. This fixes that. + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/2163 + ### Improve errors when subgraph returns non-GraphQL response with a non-2xx status code ([Issue #2117](https://github.com/apollographql/router/issues/2117)) The error response will now contain the status code and status name. Example: `HTTP fetch failed from 'my-service': 401 Unauthorized` @@ -247,7 +253,7 @@ The docs CORS regex example now displays a working and safe way to allow `HTTPS` By [@col](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/2152 -### update documentation to reflect new examples structure ([Issue #2095](https://github.com/apollographql/router/pull/2133)) +### update documentation to reflect new examples structure ([Issue #2095](https://github.com/apollographql/router/issues/2095)) We recently updated the examples directory structure. This fixes the documentation links to the examples. It also makes clear that rhai subgraph fields are read-only, since they are shared resources. diff --git a/dockerfiles/diy/build_docker_image.sh b/dockerfiles/diy/build_docker_image.sh index 41458ae52c..6681141429 100755 --- a/dockerfiles/diy/build_docker_image.sh +++ b/dockerfiles/diy/build_docker_image.sh @@ -118,11 +118,11 @@ else if [ "${BUILD_IMAGE}" = false ]; then usage fi - if [ -z "${GIT_REPO}" ]; then - GIT_REPO="${DEFAULT_REPO}" - fi fi +if [ -z "${GIT_REPO}" ]; then + GIT_REPO="${DEFAULT_REPO}" +fi # We need a place to build if ! BUILD_DIR=$(mktemp -d -t "router-build.XXXXXXXXXX"); then @@ -150,7 +150,7 @@ if [ "${BUILD_IMAGE}" = true ]; then ROUTER_VERSION=$(git rev-parse HEAD) fi # Let the user know what we are going to do - echo "Building image: ${ROUTER_VERSION}" from repo"" + echo "Building image: ${ROUTER_VERSION}" from repo: ${GIT_REPO}"" git checkout "${ROUTER_VERSION}" > /dev/null 2>&1 || terminate "Couldn't checkout ${ROUTER_VERSION}" # Build our docker images docker build -q -t "router:${ROUTER_VERSION}" \ From 5792c1f9751111d28250275ecce1fb56ea8d28fa Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Mon, 28 Nov 2022 13:46:33 +0100 Subject: [PATCH 28/45] request retries are deactivated by default on mutations (#2160) Fix #2154 Follow up on #2006 --- NEXT_CHANGELOG.md | 5 ++++- ..._configuration__tests__schema_generation.snap | 10 ++++++++++ apollo-router/src/plugins/traffic_shaping/mod.rs | 12 ++++++++++-- .../src/plugins/traffic_shaping/retry.rs | 16 +++++++++++++--- docs/source/configuration/traffic-shaping.mdx | 1 + 5 files changed, 38 insertions(+), 6 deletions(-) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 478d707e7e..e33c85b57e 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -196,6 +196,8 @@ bootstrap the system or for low traffic deployments - for each successful request, we add a "token" to the bucket, those tokens expire after `ttl` (default: 10 seconds) - the number of available additional retries is a part of the number of tokens, defined by `retry_percent` (default is 0.2) +Request retries are disabled by default on mutations. + This is activated in the `traffic_shaping` plugin, either globally or per subgraph: ```yaml @@ -205,13 +207,14 @@ traffic_shaping: min_per_sec: 10 ttl: 10s retry_percent: 0.2 + retry_mutations: false subgraphs: accounts: experimental_retry: min_per_sec: 20 ``` -By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2006 +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2006 and https://github.com/apollographql/router/pull/2160 ## 🐛 Fixes diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index 2c8fd06bca..9bb02fa171 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -2224,6 +2224,11 @@ expression: "&schema" "minimum": 0.0, "nullable": true }, + "retry_mutations": { + "description": "allows request retries on mutations. This should only be activated if mutations are idempotent. Disabled by default", + "type": "boolean", + "nullable": true + }, "retry_percent": { "description": "percentage of calls to deposit that can be retried. This is in addition to any retries allowed for via min_per_sec. Must be between 0 and 1000, default value is 0.2", "type": "number", @@ -2358,6 +2363,11 @@ expression: "&schema" "minimum": 0.0, "nullable": true }, + "retry_mutations": { + "description": "allows request retries on mutations. This should only be activated if mutations are idempotent. Disabled by default", + "type": "boolean", + "nullable": true + }, "retry_percent": { "description": "percentage of calls to deposit that can be retried. This is in addition to any retries allowed for via min_per_sec. Must be between 0 and 1000, default value is 0.2", "type": "number", diff --git a/apollo-router/src/plugins/traffic_shaping/mod.rs b/apollo-router/src/plugins/traffic_shaping/mod.rs index be952aaaf1..bd0d277355 100644 --- a/apollo-router/src/plugins/traffic_shaping/mod.rs +++ b/apollo-router/src/plugins/traffic_shaping/mod.rs @@ -110,6 +110,9 @@ struct RetryConfig { /// retries allowed for via min_per_sec. Must be between 0 and 1000, default value /// is 0.2 retry_percent: Option, + /// allows request retries on mutations. This should only be activated if mutations + /// are idempotent. Disabled by default + retry_mutations: Option, } impl Merge for RetryConfig { @@ -120,6 +123,7 @@ impl Merge for RetryConfig { ttl: self.ttl.or(fallback.ttl), min_per_sec: self.min_per_sec.or(fallback.min_per_sec), retry_percent: self.retry_percent.or(fallback.retry_percent), + retry_mutations: self.retry_mutations.or(fallback.retry_mutations), }, } } @@ -318,8 +322,12 @@ impl TrafficShaping { }); let retry = config.experimental_retry.as_ref().map(|config| { - let retry_policy = - RetryPolicy::new(config.ttl, config.min_per_sec, config.retry_percent); + let retry_policy = RetryPolicy::new( + config.ttl, + config.min_per_sec, + config.retry_percent, + config.retry_mutations, + ); tower::retry::RetryLayer::new(retry_policy) }); diff --git a/apollo-router/src/plugins/traffic_shaping/retry.rs b/apollo-router/src/plugins/traffic_shaping/retry.rs index 6138581c21..7a130502fb 100644 --- a/apollo-router/src/plugins/traffic_shaping/retry.rs +++ b/apollo-router/src/plugins/traffic_shaping/retry.rs @@ -5,9 +5,13 @@ use std::time::Duration; use tower::retry::budget::Budget; use tower::retry::Policy; +use crate::query_planner::OperationKind; +use crate::services::subgraph; + #[derive(Clone, Default)] pub(crate) struct RetryPolicy { budget: Arc, + retry_mutations: bool, } impl RetryPolicy { @@ -15,6 +19,7 @@ impl RetryPolicy { duration: Option, min_per_sec: Option, retry_percent: Option, + retry_mutations: Option, ) -> Self { Self { budget: Arc::new(Budget::new( @@ -22,14 +27,15 @@ impl RetryPolicy { min_per_sec.unwrap_or(10), retry_percent.unwrap_or(0.2), )), + retry_mutations: retry_mutations.unwrap_or(false), } } } -impl Policy for RetryPolicy { +impl Policy for RetryPolicy { type Future = future::Ready; - fn retry(&self, _req: &Req, result: Result<&Res, &E>) -> Option { + fn retry(&self, req: &subgraph::Request, result: Result<&Res, &E>) -> Option { match result { Ok(_) => { // Treat all `Response`s as success, @@ -38,6 +44,10 @@ impl Policy for RetryPolicy { None } Err(_e) => { + if req.operation_kind == OperationKind::Mutation && !self.retry_mutations { + return None; + } + let withdrew = self.budget.withdraw(); if withdrew.is_err() { return None; @@ -48,7 +58,7 @@ impl Policy for RetryPolicy { } } - fn clone_request(&self, req: &Req) -> Option { + fn clone_request(&self, req: &subgraph::Request) -> Option { Some(req.clone()) } } diff --git a/docs/source/configuration/traffic-shaping.mdx b/docs/source/configuration/traffic-shaping.mdx index c61f48e698..7f8009d1f0 100644 --- a/docs/source/configuration/traffic-shaping.mdx +++ b/docs/source/configuration/traffic-shaping.mdx @@ -43,6 +43,7 @@ traffic_shaping: min_per_sec: 10 # minimal number of retries per second (`min_per_sec`, default is 10 retries per second) ttl: 10s # for each successful request, we register a token, that expires according to this option (default: 10s) retry_percent: 0.2 # defines the proportion of available retries to the current number of tokens + retry_mutations: false # allows retries on mutations. This should only be enabled if mutations are idempotent ``` From ad6e9318cbcb10cddbb81b6c080c3dfaa6c1e54d Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Mon, 28 Nov 2022 14:38:39 +0100 Subject: [PATCH 29/45] in memory and Redis cache configuration (#2155) Fix #2075 This implements @BrynCooke's suggestion from #2075. I did not add the option for introspection because it did not feel very useful, and would be a breaking change for the configuration format. If we really need to cache introspection responses, that could be done as part of the whole response caching feature. Example configuration: ```yaml supergraph: apq: experimental_cache: in_memory: limit: 512 redis: urls: ["redis://..."] query_planning: experimental_cache: in_memory: limit: 512 redis: urls: ["redis://..."] ``` --- NEXT_CHANGELOG.md | 24 +++- .../axum_factory/axum_http_server_factory.rs | 7 +- apollo-router/src/cache/mod.rs | 11 ++ apollo-router/src/configuration/mod.rs | 110 +++++++++--------- ...nfiguration__tests__schema_generation.snap | 100 +++++++++++++++- apollo-router/src/introspection.rs | 18 +-- .../query_planner/caching_query_planner.rs | 20 +++- apollo-router/src/router.rs | 3 +- .../src/services/supergraph_service.rs | 11 +- docs/source/configuration/caching.mdx | 42 +++++++ docs/source/configuration/overview.mdx | 2 +- 11 files changed, 261 insertions(+), 87 deletions(-) create mode 100644 docs/source/configuration/caching.mdx diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index e33c85b57e..fff921e376 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -188,7 +188,7 @@ There are situations where comments and whitespace are not preserved. This may b By [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographql/router/pull/2116, https://github.com/apollographql/router/pull/2162 -### *Experimental* subgraph request retry ([Issue #338](https://github.com/apollographql/router/issues/338), [Issue #1956](https://github.com/apollographql/router/issues/1956)) +### *Experimental* 🥼 subgraph request retry ([Issue #338](https://github.com/apollographql/router/issues/338), [Issue #1956](https://github.com/apollographql/router/issues/1956)) Implements subgraph request retries, using Finagle's retry buckets algorithm: - it defines a minimal number of retries per second (`min_per_sec`, default is 10 retries per second), to @@ -216,6 +216,28 @@ traffic_shaping: By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2006 and https://github.com/apollographql/router/pull/2160 +### *Experimental* 🥼 Caching configuration ([Issue #2075](https://github.com/apollographql/router/issues/2075)) + +Split Redis cache configuration for APQ and query planning: + +```yaml +supergraph: + apq: + experimental_cache: + in_memory: + limit: 512 + redis: + urls: ["redis://..."] + query_planning: + experimental_cache: + in_memory: + limit: 512 + redis: + urls: ["redis://..."] +``` + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2155 + ## 🐛 Fixes ### fix build_docker_image.sh script when using default repo ([PR #2163](https://github.com/apollographql/router/pull/2163)) diff --git a/apollo-router/src/axum_factory/axum_http_server_factory.rs b/apollo-router/src/axum_factory/axum_http_server_factory.rs index ad640703df..5fcf3a3758 100644 --- a/apollo-router/src/axum_factory/axum_http_server_factory.rs +++ b/apollo-router/src/axum_factory/axum_http_server_factory.rs @@ -163,7 +163,12 @@ impl HttpServerFactory for AxumHttpServerFactory { RF: SupergraphServiceFactory, { Box::pin(async move { - let apq = APQLayer::with_cache(DeduplicatingCache::new().await); + let apq = APQLayer::with_cache( + DeduplicatingCache::from_configuration( + &configuration.supergraph.apq.experimental_cache, + ) + .await, + ); let all_routers = make_axum_router(service_factory, &configuration, extra_endpoints, apq)?; diff --git a/apollo-router/src/cache/mod.rs b/apollo-router/src/cache/mod.rs index abca52b917..e98a664ed6 100644 --- a/apollo-router/src/cache/mod.rs +++ b/apollo-router/src/cache/mod.rs @@ -37,6 +37,17 @@ where } } + pub(crate) async fn from_configuration(config: &crate::configuration::Cache) -> Self { + Self::with_capacity( + config.in_memory.limit, + #[cfg(feature = "experimental_cache")] + config.redis.as_ref().map(|c| c.urls.clone()), + #[cfg(not(feature = "experimental_cache"))] + None, + ) + .await + } + pub(crate) async fn get(&self, key: &K) -> Entry { // waiting on a value from the cache is a potentially long(millisecond scale) task that // can involve a network call to an external database. To reduce the waiting time, we diff --git a/apollo-router/src/configuration/mod.rs b/apollo-router/src/configuration/mod.rs index 94d00331ca..30ed5dab67 100644 --- a/apollo-router/src/configuration/mod.rs +++ b/apollo-router/src/configuration/mod.rs @@ -34,6 +34,7 @@ use serde_json::Map; use serde_json::Value; use thiserror::Error; +use crate::cache::DEFAULT_CACHE_CAPACITY; use crate::configuration::schema::Mode; use crate::executable::APOLLO_ROUTER_DEV_ENV; use crate::plugin::plugins; @@ -485,16 +486,19 @@ pub(crate) struct Supergraph { #[serde(default = "default_defer_support")] pub(crate) preview_defer_support: bool, - #[cfg(feature = "experimental_cache")] - /// URLs of Redis cache used for query planning - pub(crate) cache_redis_urls: Option>, + /// Configures automatic persisted queries + #[serde(default)] + pub(crate) apq: Apq, + + /// Query planning options + #[serde(default)] + pub(crate) query_planning: QueryPlanning, } fn default_defer_support() -> bool { true } -#[cfg(feature = "experimental_cache")] #[buildstructor::buildstructor] impl Supergraph { #[builder] @@ -503,19 +507,20 @@ impl Supergraph { path: Option, introspection: Option, preview_defer_support: Option, - cache_redis_urls: Option>, + apq: Option, + query_planning: Option, ) -> Self { Self { listen: listen.unwrap_or_else(default_graphql_listen), path: path.unwrap_or_else(default_graphql_path), introspection: introspection.unwrap_or_else(default_graphql_introspection), preview_defer_support: preview_defer_support.unwrap_or_else(default_defer_support), - cache_redis_urls, + apq: apq.unwrap_or_default(), + query_planning: query_planning.unwrap_or_default(), } } } -#[cfg(feature = "experimental_cache")] #[cfg(test)] #[buildstructor::buildstructor] impl Supergraph { @@ -525,75 +530,74 @@ impl Supergraph { path: Option, introspection: Option, preview_defer_support: Option, - cache_redis_urls: Option>, + apq: Option, + query_planning: Option, ) -> Self { Self { listen: listen.unwrap_or_else(test_listen), path: path.unwrap_or_else(default_graphql_path), introspection: introspection.unwrap_or_else(default_graphql_introspection), preview_defer_support: preview_defer_support.unwrap_or_else(default_defer_support), - cache_redis_urls, + apq: apq.unwrap_or_default(), + query_planning: query_planning.unwrap_or_default(), } } } -#[cfg(not(feature = "experimental_cache"))] -#[buildstructor::buildstructor] -impl Supergraph { - #[builder] - pub(crate) fn new( - listen: Option, - path: Option, - introspection: Option, - preview_defer_support: Option, - ) -> Self { - Self { - listen: listen.unwrap_or_else(default_graphql_listen), - path: path.unwrap_or_else(default_graphql_path), - introspection: introspection.unwrap_or_else(default_graphql_introspection), - preview_defer_support: preview_defer_support.unwrap_or_else(default_defer_support), - } +impl Default for Supergraph { + fn default() -> Self { + Self::builder().build() } } -#[cfg(not(feature = "experimental_cache"))] -#[cfg(test)] -#[buildstructor::buildstructor] -impl Supergraph { - #[builder] - pub(crate) fn fake_new( - listen: Option, - path: Option, - introspection: Option, - preview_defer_support: Option, - ) -> Self { - Self { - listen: listen.unwrap_or_else(test_listen), - path: path.unwrap_or_else(default_graphql_path), - introspection: introspection.unwrap_or_else(default_graphql_introspection), - preview_defer_support: preview_defer_support.unwrap_or_else(default_defer_support), - } - } +#[derive(Debug, Clone, Default, Deserialize, Serialize, JsonSchema)] +#[serde(deny_unknown_fields)] +pub(crate) struct Apq { + pub(crate) experimental_cache: Cache, } -impl Supergraph { +#[derive(Debug, Clone, Default, Deserialize, Serialize, JsonSchema)] +#[serde(deny_unknown_fields)] +pub(crate) struct QueryPlanning { + pub(crate) experimental_cache: Cache, +} + +#[derive(Debug, Clone, Default, Deserialize, Serialize, JsonSchema)] +#[serde(deny_unknown_fields)] + +pub(crate) struct Cache { + /// Configures the in memory cache (always active) + pub(crate) in_memory: InMemoryCache, #[cfg(feature = "experimental_cache")] - pub(crate) fn cache(&self) -> Option> { - self.cache_redis_urls.clone() - } + /// Configures and activates the Redis cache + pub(crate) redis: Option, +} - #[cfg(not(feature = "experimental_cache"))] - pub(crate) fn cache(&self) -> Option> { - None - } +#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema)] +#[serde(deny_unknown_fields)] +/// In memory cache configuration +pub(crate) struct InMemoryCache { + /// Number of entries in the Least Recently Used cache + pub(crate) limit: usize, } -impl Default for Supergraph { +impl Default for InMemoryCache { fn default() -> Self { - Self::builder().build() + Self { + limit: DEFAULT_CACHE_CAPACITY, + } } } +#[cfg(feature = "experimental_cache")] +#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema)] +#[serde(deny_unknown_fields)] +/// Redis cache configuration +pub(crate) struct RedisCache { + /// List of URLs to the Redis cluster + pub(crate) urls: Vec, +} + /// Configuration options pertaining to the sandbox page. #[derive(Debug, Clone, Deserialize, Serialize, JsonSchema)] #[serde(deny_unknown_fields)] diff --git a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap index 9bb02fa171..1b7cf9b2fb 100644 --- a/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap +++ b/apollo-router/src/configuration/snapshots/apollo_router__configuration__tests__schema_generation.snap @@ -605,10 +605,66 @@ expression: "&schema" "listen": "127.0.0.1:4000", "path": "/", "introspection": false, - "preview_defer_support": true + "preview_defer_support": true, + "apq": { + "experimental_cache": { + "in_memory": { + "limit": 512 + } + } + }, + "query_planning": { + "experimental_cache": { + "in_memory": { + "limit": 512 + } + } + } }, "type": "object", "properties": { + "apq": { + "description": "Configures automatic persisted queries", + "default": { + "experimental_cache": { + "in_memory": { + "limit": 512 + } + } + }, + "type": "object", + "required": [ + "experimental_cache" + ], + "properties": { + "experimental_cache": { + "type": "object", + "required": [ + "in_memory" + ], + "properties": { + "in_memory": { + "description": "Configures the in memory cache (always active)", + "type": "object", + "required": [ + "limit" + ], + "properties": { + "limit": { + "description": "Number of entries in the Least Recently Used cache", + "type": "integer", + "format": "uint", + "minimum": 0.0 + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + }, "introspection": { "description": "Enable introspection Default: false", "default": false, @@ -636,6 +692,48 @@ expression: "&schema" "preview_defer_support": { "default": true, "type": "boolean" + }, + "query_planning": { + "description": "Query planning options", + "default": { + "experimental_cache": { + "in_memory": { + "limit": 512 + } + } + }, + "type": "object", + "required": [ + "experimental_cache" + ], + "properties": { + "experimental_cache": { + "type": "object", + "required": [ + "in_memory" + ], + "properties": { + "in_memory": { + "description": "Configures the in memory cache (always active)", + "type": "object", + "required": [ + "limit" + ], + "properties": { + "limit": { + "description": "Number of entries in the Least Recently Used cache", + "type": "integer", + "format": "uint", + "minimum": 0.0 + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false } }, "additionalProperties": false diff --git a/apollo-router/src/introspection.rs b/apollo-router/src/introspection.rs index 3075e4c3de..1dbbcc3770 100644 --- a/apollo-router/src/introspection.rs +++ b/apollo-router/src/introspection.rs @@ -19,24 +19,15 @@ pub(crate) struct Introspection { } impl Introspection { - pub(crate) async fn with_capacity( - configuration: &Configuration, - capacity: usize, - redis_urls: Option>, - ) -> Self { + pub(crate) async fn with_capacity(configuration: &Configuration, capacity: usize) -> Self { Self { - cache: CacheStorage::new(capacity, redis_urls).await, + cache: CacheStorage::new(capacity, None).await, defer_support: configuration.supergraph.preview_defer_support, } } pub(crate) async fn new(configuration: &Configuration) -> Self { - Self::with_capacity( - configuration, - DEFAULT_INTROSPECTION_CACHE_CAPACITY, - configuration.supergraph.cache(), - ) - .await + Self::with_capacity(configuration, DEFAULT_INTROSPECTION_CACHE_CAPACITY).await } #[cfg(test)] @@ -44,8 +35,7 @@ impl Introspection { configuration: &Configuration, cache: HashMap, ) -> Self { - let this = - Self::with_capacity(configuration, cache.len(), configuration.supergraph.cache()).await; + let this = Self::with_capacity(configuration, cache.len()).await; for (query, response) in cache.into_iter() { this.cache.insert(query, response).await; diff --git a/apollo-router/src/query_planner/caching_query_planner.rs b/apollo-router/src/query_planner/caching_query_planner.rs index bb5cfab671..da2ed9c789 100644 --- a/apollo-router/src/query_planner/caching_query_planner.rs +++ b/apollo-router/src/query_planner/caching_query_planner.rs @@ -35,11 +35,11 @@ where /// Creates a new query planner that caches the results of another [`QueryPlanner`]. pub(crate) async fn new( delegate: T, - plan_cache_limit: usize, schema_id: Option, - redis_urls: Option>, + config: &crate::configuration::QueryPlanning, ) -> CachingQueryPlanner { - let cache = Arc::new(DeduplicatingCache::with_capacity(plan_cache_limit, redis_urls).await); + let cache = + Arc::new(DeduplicatingCache::from_configuration(&config.experimental_cache).await); Self { cache, delegate, @@ -262,7 +262,12 @@ mod tests { planner }); - let mut planner = CachingQueryPlanner::new(delegate, 10, None, None).await; + let mut planner = CachingQueryPlanner::new( + delegate, + None, + &crate::configuration::QueryPlanning::default(), + ) + .await; for _ in 0..5 { assert!(planner @@ -318,7 +323,12 @@ mod tests { planner }); - let mut planner = CachingQueryPlanner::new(delegate, 10, None, None).await; + let mut planner = CachingQueryPlanner::new( + delegate, + None, + &crate::configuration::QueryPlanning::default(), + ) + .await; for _ in 0..5 { assert!(planner diff --git a/apollo-router/src/router.rs b/apollo-router/src/router.rs index 88d583f5c4..41352ca776 100644 --- a/apollo-router/src/router.rs +++ b/apollo-router/src/router.rs @@ -36,7 +36,6 @@ use self::Event::UpdateSchema; use crate::axum_factory::make_axum_router; use crate::axum_factory::AxumHttpServerFactory; use crate::axum_factory::ListenAddrAndRouter; -use crate::cache::DeduplicatingCache; use crate::configuration::Configuration; use crate::configuration::ListenAddr; use crate::plugin::DynPlugin; @@ -65,7 +64,7 @@ async fn make_transport_service( .create(configuration.clone(), schema, None, Some(extra_plugins)) .await?; - let apq = APQLayer::with_cache(DeduplicatingCache::new().await); + let apq = APQLayer::new().await; let web_endpoints = service_factory.web_endpoints(); let routers = make_axum_router(service_factory, &configuration, web_endpoints, apq)?; // FIXME: how should diff --git a/apollo-router/src/services/supergraph_service.rs b/apollo-router/src/services/supergraph_service.rs index c409359174..4018f25698 100644 --- a/apollo-router/src/services/supergraph_service.rs +++ b/apollo-router/src/services/supergraph_service.rs @@ -315,12 +315,6 @@ impl PluggableSupergraphServiceBuilder { let configuration = self.configuration.unwrap_or_default(); - let plan_cache_limit = std::env::var("ROUTER_PLAN_CACHE_LIMIT") - .ok() - .and_then(|x| x.parse().ok()) - .unwrap_or(100); - let redis_urls = configuration.supergraph.cache(); - let introspection = if configuration.supergraph.introspection { Some(Arc::new(Introspection::new(&configuration).await)) } else { @@ -329,14 +323,13 @@ impl PluggableSupergraphServiceBuilder { // QueryPlannerService takes an UnplannedRequest and outputs PlannedRequest let bridge_query_planner = - BridgeQueryPlanner::new(self.schema.clone(), introspection, configuration) + BridgeQueryPlanner::new(self.schema.clone(), introspection, configuration.clone()) .await .map_err(ServiceBuildError::QueryPlannerError)?; let query_planner_service = CachingQueryPlanner::new( bridge_query_planner, - plan_cache_limit, self.schema.schema_id.clone(), - redis_urls, + &configuration.supergraph.query_planning, ) .await; diff --git a/docs/source/configuration/caching.mdx b/docs/source/configuration/caching.mdx new file mode 100644 index 0000000000..5a31a430bb --- /dev/null +++ b/docs/source/configuration/caching.mdx @@ -0,0 +1,42 @@ +--- +title: Caching in the Apollo Router +--- + +The Apollo Router comes with an in-memory cache, used to store Automated Persisted Queries (APQ) and query plans. +This is a Least Recently Used (LRU) cache, that can be configured as follows: + +```yaml title="router.yaml" +supergraph: + apq: + experimental_cache: + in_memory: + limit: 512 + query_planning: + experimental_cache: + in_memory: + limit: 512 +``` + +Introspection responses are cached too, but that cache is not configurable for now. + +## Experimental Redis cache + +The Apollo Router has an experimental external storage cache, using Redis Cluster. It can be tested by building a custom Router binary, with the Cargo feature `experimental_cache`. + +This will activate a configuration option to connect to a Redis Cluster: + +```yaml +supergraph: + apq: + experimental_cache: + in_memory: + limit: 512 + redis: + urls: ["redis://..."] + query_planning: + experimental_cache: + in_memory: + limit: 512 + redis: + urls: ["redis://..."] +``` \ No newline at end of file diff --git a/docs/source/configuration/overview.mdx b/docs/source/configuration/overview.mdx index 2a3d8043a2..65e45000e3 100644 --- a/docs/source/configuration/overview.mdx +++ b/docs/source/configuration/overview.mdx @@ -368,7 +368,7 @@ See [Tracing in the Apollo Router](./tracing/). ### Automatic persisted queries (APQ) Automatic Persisted Queries (APQ) enable GraphQL clients to send a server the _hash_ of their query string, _instead of_ the query string itself. This can significantly reduce network usage for very large query strings. -The Apollo Router automatically supports APQ via its in-memory cache. **No configuration options are supported at this time.** Support for external data stores like Redis and Memcached will be supported in a future release. +The Apollo Router automatically supports APQ via its in-memory cache. See the [caching documentation](./caching) for related options. For more information on APQ, including client configuration, see [this article](/apollo-server/performance/apq/). From d8f9625d8ca7f1052acdf02c2ed5b0b6366e5047 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 28 Nov 2022 15:44:25 +0000 Subject: [PATCH 30/45] chore(deps): update all non-major packages >= 1.0 --- Cargo.lock | 16 ++++++++-------- apollo-router/Cargo.toml | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dc6f894466..13d7edb566 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2287,9 +2287,9 @@ dependencies = [ [[package]] name = "insta" -version = "1.21.1" +version = "1.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba1e75aa1530e7385af7b2685478dece08dafb9db3b4225c753286decea83bef" +checksum = "261bf85ed492cd1c47c9ba675e48649682a9d2d2e77f515c5386d7726fb0ba76" dependencies = [ "console 0.15.2", "lazy_static", @@ -4412,9 +4412,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.147" +version = "1.0.148" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965" +checksum = "e53f64bb4ba0191d6d0676e1b141ca55047d83b74f5607e6d8eb88126c52c2dc" dependencies = [ "serde_derive", ] @@ -4431,9 +4431,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.147" +version = "1.0.148" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852" +checksum = "a55492425aa53521babf6137309e7d34c20bbfbbfcfe2c7f3a047fd1f6b92c0c" dependencies = [ "proc-macro2", "quote", @@ -4808,9 +4808,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.103" +version = "1.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a864042229133ada95abf3b54fdc62ef5ccabe9515b64717bcb9a1919e59445d" +checksum = "4ae548ec36cf198c0ef7710d3c230987c2d6d7bd98ad6edc0274462724c585ce" dependencies = [ "proc-macro2", "quote", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 671032a8bf..a496f9f423 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -158,7 +158,7 @@ rust-embed="6.4.2" schemars = { version = "0.8.11", features = ["url"] } shellexpand = "2.1.2" sha2 = "0.10.6" -serde = { version = "1.0.147", features = ["derive", "rc"] } +serde = { version = "1.0.148", features = ["derive", "rc"] } serde_json_bytes = { version = "0.2.0", features = ["preserve_order"] } serde_json = { version = "1.0.89", features = ["preserve_order"] } serde_urlencoded = "0.7.1" @@ -202,7 +202,7 @@ uname = "0.1.1" uname = "0.1.1" [dev-dependencies] -insta = { version = "1.21.1", features = ["json", "redactions"] } +insta = { version = "1.21.2", features = ["json", "redactions"] } introspector-gadget = "0.1.0" maplit = "1.0.2" memchr = { version = "2.5.0", default-features = false } From c1dea9cda09d856e11a9112170090d6b60a0ec75 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 28 Nov 2022 18:58:04 +0000 Subject: [PATCH 31/45] fix(deps): update dependency @apollo/server to v4.2.2 --- dockerfiles/fed2-demo-gateway/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dockerfiles/fed2-demo-gateway/package.json b/dockerfiles/fed2-demo-gateway/package.json index 6ca965a8fe..48e53c6bd6 100644 --- a/dockerfiles/fed2-demo-gateway/package.json +++ b/dockerfiles/fed2-demo-gateway/package.json @@ -7,7 +7,7 @@ "start": "node gateway.js" }, "dependencies": { - "@apollo/server": "4.2.1", + "@apollo/server": "4.2.2", "@apollo/gateway": "2.2.1", "supergraph-demo-opentelemetry": "0.2.4", "graphql": "16.6.0" From 6cbbec1c4b4d6f6400ed3b1000c7dbef279731c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9e=20Kooi?= Date: Tue, 29 Nov 2022 13:36:39 +0100 Subject: [PATCH 32/45] update apollo-rs (#2177) the change from `impl From` to `impl TryFrom<$ParserType> for $RustType` affects the router. previously `parse_value` could panic because of apollo-parser, now it will just return None if the inputs are wrong. there are now two versions of apollo-encoder in the dep tree, until https://github.com/apollographql/introspector-gadget/pull/5 is merged --- Cargo.lock | 45 ++++++++++++++++++------- NEXT_CHANGELOG.md | 6 +++- apollo-router/Cargo.toml | 2 +- apollo-router/src/spec/query.rs | 12 +++---- examples/supergraph-sdl/rust/Cargo.toml | 4 +-- fuzz/Cargo.toml | 4 +-- fuzz/src/lib.rs | 6 +++- 7 files changed, 53 insertions(+), 26 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 13d7edb566..645fb5023c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -108,11 +108,11 @@ checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6" [[package]] name = "apollo-compiler" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8469cb8f7f42ad9b062e70f1148320e6cb7d727d2e87ca46635633ca6b2e6e4d" +checksum = "c4e8b67b10b590ac58e555af24b7cc7863deacb4c7bc6ae2efd60b0256a2b654" dependencies = [ - "apollo-parser", + "apollo-parser 0.4.0", "miette 4.7.1", "ordered-float 2.10.0", "rowan", @@ -127,7 +127,17 @@ version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b17d38f06e92256e9b0b271b878e20309822a587b2acfa234a60d36d92b6b43" dependencies = [ - "apollo-parser", + "apollo-parser 0.3.2", + "thiserror", +] + +[[package]] +name = "apollo-encoder" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "555c85cfb5672ee5d5925db34c15b8bc53e97bf5c67eb0a75d54ee9fe51ec8f0" +dependencies = [ + "apollo-parser 0.4.0", "thiserror", ] @@ -140,6 +150,16 @@ dependencies = [ "rowan", ] +[[package]] +name = "apollo-parser" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bceda0395fd9cf784b4c6bb4adbaee52706ed7cbe7d2403e77e62cdc760145d2" +dependencies = [ + "rowan", + "thiserror", +] + [[package]] name = "apollo-router" version = "1.4.0" @@ -147,7 +167,7 @@ dependencies = [ "access-json", "ansi_term", "anyhow", - "apollo-parser", + "apollo-parser 0.4.0", "askama", "async-compression", "async-trait", @@ -287,14 +307,15 @@ dependencies = [ [[package]] name = "apollo-smith" -version = "0.2.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "796458df9954e3f7ad451dd231d3d369296d349371878a31b9641470989bb112" +checksum = "e9c717390e188a27cbbe09c76042332bf6cc3bb6a83a73d71c0bceb6f2d73cb9" dependencies = [ - "apollo-encoder", - "apollo-parser", + "apollo-encoder 0.4.0", + "apollo-parser 0.4.0", "arbitrary", "once_cell", + "thiserror", ] [[package]] @@ -2322,7 +2343,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a6a5345dd15741225868a205140b730de97b4f00b7d22a8520ae9d3e6266518" dependencies = [ - "apollo-encoder", + "apollo-encoder 0.3.4", "backoff", "graphql_client", "humantime", @@ -4073,7 +4094,7 @@ dependencies = [ name = "router-fuzz" version = "0.0.0" dependencies = [ - "apollo-parser", + "apollo-parser 0.4.0", "apollo-smith", "env_logger", "libfuzzer-sys", @@ -4768,7 +4789,7 @@ version = "0.1.0" dependencies = [ "anyhow", "apollo-compiler", - "apollo-parser", + "apollo-parser 0.4.0", "apollo-router", "async-trait", "futures", diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index fff921e376..7494558799 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -261,13 +261,17 @@ By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/p ## 🛠 Maintenance - ### Refactor APQ ([PR #2129](https://github.com/apollographql/router/pull/2129)) Remove duplicated code. By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2129 +### Update apollo-rs ([PR #2177](https://github.com/apollographql/router/pull/2177)) + +Updates to new apollo-rs APIs, and fixes some potential panics on unexpected user input. + +By [@goto-bus-stop](https://github.com/goto-bus-stop) in https://github.com/apollographql/router/pull/2177 ## 📚 Documentation diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index a496f9f423..9dbc554f44 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -41,7 +41,7 @@ features = ["docs_rs"] access-json = "0.1.0" anyhow = "1.0.66" ansi_term = "0.12" -apollo-parser = "0.3.2" +apollo-parser = "0.4.0" async-compression = { version = "0.3.15", features = [ "tokio", "brotli", diff --git a/apollo-router/src/spec/query.rs b/apollo-router/src/spec/query.rs index 5c11a3bdc7..956ebd8070 100644 --- a/apollo-router/src/spec/query.rs +++ b/apollo-router/src/spec/query.rs @@ -209,10 +209,8 @@ impl Query { ) -> Result { let string = query.into(); - let parser = apollo_parser::Parser::with_recursion_limit( - string.as_str(), - configuration.server.experimental_parser_recursion_limit, - ); + let parser = apollo_parser::Parser::new(string.as_str()) + .recursion_limit(configuration.server.experimental_parser_recursion_limit); let tree = parser.parse(); // Trace log recursion limit data @@ -1112,8 +1110,8 @@ fn parse_default_value(definition: &ast::VariableDefinition) -> Option { pub(crate) fn parse_value(value: &ast::Value) -> Option { match value { ast::Value::Variable(_) => None, - ast::Value::StringValue(s) => Some(String::from(s).into()), - ast::Value::FloatValue(f) => Some(f64::from(f).into()), + ast::Value::StringValue(s) => String::try_from(s).ok().map(Into::into), + ast::Value::FloatValue(f) => f64::try_from(f).ok().map(Into::into), ast::Value::IntValue(i) => { let s = i.source_string(); s.parse::() @@ -1121,7 +1119,7 @@ pub(crate) fn parse_value(value: &ast::Value) -> Option { .map(Into::into) .or_else(|| s.parse::().ok().map(Into::into)) } - ast::Value::BooleanValue(b) => Some(bool::from(b).into()), + ast::Value::BooleanValue(b) => bool::try_from(b).ok().map(Into::into), ast::Value::NullValue(_) => Some(Value::Null), ast::Value::EnumValue(e) => e.name().map(|n| n.text().to_string().into()), ast::Value::ListValue(l) => l diff --git a/examples/supergraph-sdl/rust/Cargo.toml b/examples/supergraph-sdl/rust/Cargo.toml index 8b711f4819..a5a9c81026 100644 --- a/examples/supergraph-sdl/rust/Cargo.toml +++ b/examples/supergraph-sdl/rust/Cargo.toml @@ -6,8 +6,8 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] anyhow = "1" -apollo-compiler = "0.3.0" -apollo-parser = "0.3.2" +apollo-compiler = "0.4.0" +apollo-parser = "0.4.0" apollo-router = { path = "../../../apollo-router" } async-trait = "0.1" futures = "0.3" diff --git a/fuzz/Cargo.toml b/fuzz/Cargo.toml index 9e02e98d13..3da0c2411c 100644 --- a/fuzz/Cargo.toml +++ b/fuzz/Cargo.toml @@ -11,8 +11,8 @@ cargo-fuzz = true [dependencies] libfuzzer-sys = "0.4" -apollo-smith = { version = "0.2.0", features = ["parser-impl"] } -apollo-parser = "0.3.2" +apollo-smith = { version = "0.3.1", features = ["parser-impl"] } +apollo-parser = "0.4.0" env_logger = "0.9.3" log = "0.4" reqwest = { version = "0.11", features = ["json", "blocking"] } diff --git a/fuzz/src/lib.rs b/fuzz/src/lib.rs index ed5415c334..b22056214c 100644 --- a/fuzz/src/lib.rs +++ b/fuzz/src/lib.rs @@ -1,5 +1,6 @@ // The fuzzer won't compile on windows as of 1.63.0 #![cfg(not(windows))] +use std::convert::TryFrom; use std::fs; use apollo_parser::Parser; @@ -29,7 +30,10 @@ pub fn generate_valid_operation(input: &[u8], schema_path: &'static str) -> Resu } let mut u = Unstructured::new(input); - let mut gql_doc = DocumentBuilder::with_document(&mut u, Document::from(tree.document()))?; + let mut gql_doc = DocumentBuilder::with_document( + &mut u, + Document::try_from(tree.document()).expect("tree should not have errors"), + )?; let operation_def = gql_doc.operation_definition()?.unwrap(); Ok(operation_def.into()) From 736066d42724980c39a68748804ea950791ce42f Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 29 Nov 2022 09:20:48 +0000 Subject: [PATCH 33/45] fix(deps): update rust crate async-trait to 0.1.59 --- Cargo.lock | 4 ++-- apollo-router/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 645fb5023c..204cce33db 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -446,9 +446,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.58" +version = "0.1.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e805d94e6b5001b651426cf4cd446b1ab5f319d27bab5c644f61de0a804360c" +checksum = "31e6e93155431f3931513b243d371981bb2770112b370c82745a1d19d2f99364" dependencies = [ "proc-macro2", "quote", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 9dbc554f44..680e383837 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -48,7 +48,7 @@ async-compression = { version = "0.3.15", features = [ "gzip", "deflate", ] } -async-trait = "0.1.58" +async-trait = "0.1.59" atty = "0.2.14" axum = { version = "0.5.17", features = ["headers", "json", "original-uri"] } backtrace = "0.3.66" From 3b75a3596d6ce05f2a0f5d299864760d657dd7d1 Mon Sep 17 00:00:00 2001 From: Gary Pennington Date: Thu, 1 Dec 2022 09:36:39 +0000 Subject: [PATCH 34/45] verify that deferred fragment acts as a boundary for nullability rules (#2183) fixes: #2169 Add a new test to confirm this is the case. --- NEXT_CHANGELOG.md | 10 +- ...eferred_fragment_bounds_nullability-2.snap | 129 ++++++++++++++++++ ..._deferred_fragment_bounds_nullability.snap | 25 ++++ .../src/services/supergraph_service.rs | 82 +++++++++++ 4 files changed, 244 insertions(+), 2 deletions(-) create mode 100644 apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__deferred_fragment_bounds_nullability-2.snap create mode 100644 apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__deferred_fragment_bounds_nullability.snap diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 7494558799..42cd36ad59 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -26,7 +26,7 @@ By [@USERNAME](https://github.com/USERNAME) in https://github.com/apollographql/ # [x.x.x] (unreleased) - 2022-mm-dd ## ❗ BREAKING ❗ -### Router debug Docker images now run under the control of heaptrack ([Issue #2135](https://github.com/apollographql/router/pull/2142)) +### Router debug Docker images now run under the control of heaptrack ([Issue #2135](https://github.com/apollographql/router/issues/2135)) From the next release, our debug Docker image will invoke the router under the control of heaptrack. We are making this change to make it simple for users to investigate potential memory issues with the router. @@ -115,7 +115,7 @@ telemetry: headers: true ``` -### Provide multi-arch (amd64/arm64) Docker images for the Router ([Issue #1932](https://github.com/apollographql/router/pull/2138)) +### Provide multi-arch (amd64/arm64) Docker images for the Router ([Issue #1932](https://github.com/apollographql/router/issues/1932)) From the next release, our Docker images will be multi-arch. @@ -261,6 +261,12 @@ By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/p ## 🛠 Maintenance +### Verify that deferred fragment acts as a boundary for nullability rules ([Issue #2169](https://github.com/apollographql/router/issues/2169)) + +Add a test to ensure that deferred fragments act as a boundary for nullability rules. + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/2183 + ### Refactor APQ ([PR #2129](https://github.com/apollographql/router/pull/2129)) Remove duplicated code. diff --git a/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__deferred_fragment_bounds_nullability-2.snap b/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__deferred_fragment_bounds_nullability-2.snap new file mode 100644 index 0000000000..acd2170b0b --- /dev/null +++ b/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__deferred_fragment_bounds_nullability-2.snap @@ -0,0 +1,129 @@ +--- +source: apollo-router/src/services/supergraph_service.rs +expression: stream.next_response().await.unwrap() +--- +{ + "hasNext": false, + "incremental": [ + { + "data": null, + "path": [ + "currentUser", + "activeOrganization", + "suborga", + 0 + ], + "extensions": { + "valueCompletion": [ + { + "message": "Cannot return null for non-nullable field Organization.nonNullId", + "path": [ + "currentUser", + "activeOrganization", + "suborga", + 0 + ] + }, + { + "message": "Cannot return null for non-nullable field Organization.nonNullId", + "path": [ + "currentUser", + "activeOrganization", + "suborga", + 1 + ] + }, + { + "message": "Cannot return null for non-nullable field Organization.nonNullId", + "path": [ + "currentUser", + "activeOrganization", + "suborga", + 2 + ] + } + ] + } + }, + { + "data": null, + "path": [ + "currentUser", + "activeOrganization", + "suborga", + 1 + ], + "extensions": { + "valueCompletion": [ + { + "message": "Cannot return null for non-nullable field Organization.nonNullId", + "path": [ + "currentUser", + "activeOrganization", + "suborga", + 0 + ] + }, + { + "message": "Cannot return null for non-nullable field Organization.nonNullId", + "path": [ + "currentUser", + "activeOrganization", + "suborga", + 1 + ] + }, + { + "message": "Cannot return null for non-nullable field Organization.nonNullId", + "path": [ + "currentUser", + "activeOrganization", + "suborga", + 2 + ] + } + ] + } + }, + { + "data": null, + "path": [ + "currentUser", + "activeOrganization", + "suborga", + 2 + ], + "extensions": { + "valueCompletion": [ + { + "message": "Cannot return null for non-nullable field Organization.nonNullId", + "path": [ + "currentUser", + "activeOrganization", + "suborga", + 0 + ] + }, + { + "message": "Cannot return null for non-nullable field Organization.nonNullId", + "path": [ + "currentUser", + "activeOrganization", + "suborga", + 1 + ] + }, + { + "message": "Cannot return null for non-nullable field Organization.nonNullId", + "path": [ + "currentUser", + "activeOrganization", + "suborga", + 2 + ] + } + ] + } + } + ] +} diff --git a/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__deferred_fragment_bounds_nullability.snap b/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__deferred_fragment_bounds_nullability.snap new file mode 100644 index 0000000000..7f3a7ac9be --- /dev/null +++ b/apollo-router/src/services/snapshots/apollo_router__services__supergraph_service__tests__deferred_fragment_bounds_nullability.snap @@ -0,0 +1,25 @@ +--- +source: apollo-router/src/services/supergraph_service.rs +expression: stream.next_response().await.unwrap() +--- +{ + "data": { + "currentUser": { + "activeOrganization": { + "id": "0", + "suborga": [ + { + "id": "1" + }, + { + "id": "2" + }, + { + "id": "3" + } + ] + } + } + }, + "hasNext": true +} diff --git a/apollo-router/src/services/supergraph_service.rs b/apollo-router/src/services/supergraph_service.rs index 4018f25698..da3c5c9840 100644 --- a/apollo-router/src/services/supergraph_service.rs +++ b/apollo-router/src/services/supergraph_service.rs @@ -609,6 +609,88 @@ mod tests { insta::assert_json_snapshot!(stream.next_response().await.unwrap()); } + #[tokio::test] + async fn deferred_fragment_bounds_nullability() { + let subgraphs = MockedSubgraphs([ + ("user", MockSubgraph::builder().with_json( + serde_json::json!{{"query":"{currentUser{activeOrganization{__typename id}}}"}}, + serde_json::json!{{"data": {"currentUser": { "activeOrganization": { "__typename": "Organization", "id": "0" } }}}} + ).build()), + ("orga", MockSubgraph::builder().with_json( + serde_json::json!{{ + "query":"query($representations:[_Any!]!){_entities(representations:$representations){...on Organization{suborga{__typename id}}}}", + "variables": { + "representations":[{"__typename": "Organization", "id":"0"}] + } + }}, + serde_json::json!{{ + "data": { + "_entities": [{ "suborga": [ + { "__typename": "Organization", "id": "1"}, + { "__typename": "Organization", "id": "2"}, + { "__typename": "Organization", "id": "3"}, + ] }] + }, + }} + ) + .with_json( + serde_json::json!{{ + "query":"query($representations:[_Any!]!){_entities(representations:$representations){...on Organization{name}}}", + "variables": { + "representations":[ + {"__typename": "Organization", "id":"1"}, + {"__typename": "Organization", "id":"2"}, + {"__typename": "Organization", "id":"3"} + + ] + } + }}, + serde_json::json!{{ + "data": { + "_entities": [ + { "__typename": "Organization", "id": "1"}, + { "__typename": "Organization", "id": "2", "name": "A"}, + { "__typename": "Organization", "id": "3"}, + ] + }, + "errors": [ + { + "message": "error orga 1", + "path": ["_entities", 0], + }, + { + "message": "error orga 3", + "path": ["_entities", 2], + } + ] + }} + ).build()) + ].into_iter().collect()); + + let service = TestHarness::builder() + .configuration_json(serde_json::json!({"include_subgraph_errors": { "all": true } })) + .unwrap() + .schema(SCHEMA) + .extra_plugin(subgraphs) + .build() + .await + .unwrap(); + + let request = supergraph::Request::fake_builder() + .header("Accept", "multipart/mixed; deferSpec=20220824") + .query( + "query { currentUser { activeOrganization { id suborga { id ...@defer { nonNullId } } } } }", + ) + .build() + .unwrap(); + + let mut stream = service.oneshot(request).await.unwrap(); + + insta::assert_json_snapshot!(stream.next_response().await.unwrap()); + + insta::assert_json_snapshot!(stream.next_response().await.unwrap()); + } + #[tokio::test] async fn errors_on_incremental_responses() { let subgraphs = MockedSubgraphs([ From 80b504767f3368b1b778ef3c0c88d440d6552db9 Mon Sep 17 00:00:00 2001 From: Gary Pennington Date: Thu, 1 Dec 2022 10:18:37 +0000 Subject: [PATCH 35/45] chore: update protobuf (#2187) it's changed... --- .../src/spaceport/proto/reports.proto | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/apollo-router/src/spaceport/proto/reports.proto b/apollo-router/src/spaceport/proto/reports.proto index 831f3abaf3..26fe614cc1 100644 --- a/apollo-router/src/spaceport/proto/reports.proto +++ b/apollo-router/src/spaceport/proto/reports.proto @@ -113,11 +113,11 @@ message Trace { // services may not be unique. See https://github.com/apollographql/federation/blob/main/query-planner-js/src/QueryPlan.ts // for more information and details. message QueryPlanNode { - // This represents a set of nodes to be executed sequentially by the Gateway executor + // This represents a set of nodes to be executed sequentially by the Router/Gateway executor message SequenceNode { repeated QueryPlanNode nodes = 1; } - // This represents a set of nodes to be executed in parallel by the Gateway executor + // This represents a set of nodes to be executed in parallel by the Router/Gateway executor message ParallelNode { repeated QueryPlanNode nodes = 1; } @@ -131,14 +131,14 @@ message Trace { bool trace_parsing_failed = 2; // This Trace only contains start_time, end_time, duration_ns, and root; - // all timings were calculated **on the federated service**, and clock skew + // all timings were calculated **on the subgraph**, and clock skew // will be handled by the ingress server. Trace trace = 3; - // relative to the outer trace's start_time, in ns, measured in the gateway. + // relative to the outer trace's start_time, in ns, measured in the Router/Gateway. uint64 sent_time_offset = 4; - // Wallclock times measured in the gateway for when this operation was + // Wallclock times measured in the Router/Gateway for when this operation was // sent and received. google.protobuf.Timestamp sent_time = 5; google.protobuf.Timestamp received_time = 6; @@ -169,7 +169,7 @@ message Trace { message DeferredNode { repeated DeferredNodeDepends depends = 1; string label = 2; - ResponsePathElement path = 3; + repeated ResponsePathElement path = 3; QueryPlanNode node = 4; } message DeferredNodeDepends { @@ -206,7 +206,7 @@ message Trace { // If this is true, the trace is potentially missing some nodes that were // present on the query plan. This can happen if the trace span buffer used - // in the router fills up and some spans have to be dropped. In these cases + // in the Router fills up and some spans have to be dropped. In these cases // the overall trace timing will still be correct, but the trace data could // be missing some referenced or executed fields, and some nodes may be // missing. If this is true we should display a warning to the user when they @@ -214,8 +214,8 @@ message Trace { bool is_incomplete = 33; // ------------------------------------------------------------------------- - // Fields below this line are *not* included in federated traces (the traces - // sent from federated services to the gateway). + // Fields below this line are *not* included in inline traces (the traces + // sent from subgraphs to the Router/Gateway). // In addition to details.raw_query, we include a "signature" of the query, // which can be normalized: for example, you may want to discard aliases, drop @@ -244,10 +244,10 @@ message Trace { CachePolicy cache_policy = 18; - // If this Trace was created by a gateway, this is the query plan, including - // sub-Traces for federated services. Note that the 'root' tree on the + // If this Trace was created by a Router/Gateway, this is the query plan, including + // sub-Traces for subgraphs. Note that the 'root' tree on the // top-level Trace won't contain any resolvers (though it could contain errors - // that occurred in the gateway itself). + // that occurred in the Router/Gateway itself). QueryPlanNode query_plan = 26; // Was this response served from a full query response cache? (In that case From 4360256a845cc02c87ee19c5f1af43377ced1855 Mon Sep 17 00:00:00 2001 From: Coenen Benjamin Date: Fri, 2 Dec 2022 10:31:50 +0100 Subject: [PATCH 36/45] fix: panic when dev mode enabled with empty config file (#2195) close #2182 Signed-off-by: Benjamin Coenen <5719034+bnjjj@users.noreply.github.com> --- NEXT_CHANGELOG.md | 6 ++++++ apollo-router/src/configuration/mod.rs | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 42cd36ad59..5c7a0508b9 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -240,6 +240,12 @@ By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/p ## 🐛 Fixes +### Fix panic when dev mode enabled with empty config file ([Issue #2182](https://github.com/apollographql/router/issues/2182)) + +If you're running the Router with dev mode with an empty config file, it will no longer panic + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/2165 + ### fix build_docker_image.sh script when using default repo ([PR #2163](https://github.com/apollographql/router/pull/2163)) Adding the `-r` flag recently broke the existing functionality to build from the default repo using `-b`. This fixes that. diff --git a/apollo-router/src/configuration/mod.rs b/apollo-router/src/configuration/mod.rs index 30ed5dab67..721004b4ee 100644 --- a/apollo-router/src/configuration/mod.rs +++ b/apollo-router/src/configuration/mod.rs @@ -217,8 +217,8 @@ impl Configuration { // Enable experimental_response_trace_id self.apollo_plugins .plugins - .get_mut("telemetry") - .expect("telemetry plugin must be initialized at this point") + .entry("telemetry") + .or_insert_with(|| json!({})) .as_object_mut() .expect("configuration for telemetry must be an object") .entry("tracing") From fda9f15985cfd740a492b1756ea12b9eaa70da32 Mon Sep 17 00:00:00 2001 From: Jeremy Lempereur Date: Fri, 2 Dec 2022 10:40:31 +0100 Subject: [PATCH 37/45] Docs: Add a disclaimer for users who set up health-checks and prometheus endpoints in a containers environment. (#2194) fixes #2079 The health check and the prometheus endpoint listen to 127.0.0.1 by default. While this is a safe default, it prevents other pods from performing healthchecks and scrapping prometheus data. This behavior and customization is now documented. Co-authored-by: Gary Pennington --- NEXT_CHANGELOG.md | 10 +++++++++- docs/source/configuration/health-checks.mdx | 12 ++++++++++++ docs/source/configuration/metrics.mdx | 17 +++++++++++++++++ 3 files changed, 38 insertions(+), 1 deletion(-) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 5c7a0508b9..88956f16cf 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -291,7 +291,7 @@ By [@goto-bus-stop](https://github.com/goto-bus-stop) in https://github.com/apol The docs CORS regex example now displays a working and safe way to allow `HTTPS` subdomains of `api.example.com`. -By [@col](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/2152 +By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/2152 ### update documentation to reflect new examples structure ([Issue #2095](https://github.com/apollographql/router/issues/2095)) @@ -300,3 +300,11 @@ We recently updated the examples directory structure. This fixes the documentati By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/2133 + +### Docs: Add a disclaimer for users who set up health-checks and prometheus endpoints in a containers environment ([Issue #2079](https://github.com/apollographql/router/issues/2079)) + +The health check and the prometheus endpoint listen to 127.0.0.1 by default. +While this is a safe default, it prevents other pods from performing healthchecks and scraping prometheus data. +This behavior and customization is now documented in the [health-checks](https://www.apollographql.com/docs/router/configuration/health-checks) and the [prometheus](https://www.apollographql.com/docs/router/configuration/metrics#using-prometheus) sections. + +By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/2194 diff --git a/docs/source/configuration/health-checks.mdx b/docs/source/configuration/health-checks.mdx index e0d6863b3b..0db371e4b7 100644 --- a/docs/source/configuration/health-checks.mdx +++ b/docs/source/configuration/health-checks.mdx @@ -37,6 +37,18 @@ $ curl -v "http://127.0.0.1:8088/health" {"status":"UP"} ``` +## Using in a containers environment + +The health-check listens to 127.0.0.1 by default, which won't allow connections issued from a network. +While this is a safe default, *other containers won't be able to perform healthchecks*, which will prevent the router pod from switching to a healthy state. + +You can change this by setting `health-check`: +```yaml title="router.yaml" +health-check: + listen: 0.0.0.0:8088 + enabled: true +``` + ## Using with Kubernetes In Kubernetes, you can configure health checks by setting `readinessProbe` and `livenessProbe` on the `containers` object of the resource definition: ```yaml diff --git a/docs/source/configuration/metrics.mdx b/docs/source/configuration/metrics.mdx index 3d6517633f..80b466d9d9 100644 --- a/docs/source/configuration/metrics.mdx +++ b/docs/source/configuration/metrics.mdx @@ -24,6 +24,23 @@ telemetry: path: /metrics ``` +## Using in a containers environment + +The prometheus endpoint listens to 127.0.0.1 by default, which won't allow connections issued from a network. +While this is a safe default, *other containers won't be able to access the prometheus endpoint*, which will disable metric scraping. + +You can change this by setting: +```yaml title="router.yaml" +telemetry: + metrics: + prometheus: + # By setting this endpoint you enable other containers and pods to access the prometheus endpoint + enabled: true + listen: 0.0.0.0:9090 + path: /metrics +``` + + Assuming you're running locally: 1. Run a query against the router. From 728552f0ca934e0bc6803b803859d5c956a832d9 Mon Sep 17 00:00:00 2001 From: Bryn Cooke Date: Fri, 2 Dec 2022 12:35:38 +0000 Subject: [PATCH 38/45] Added Apollo Tracing support for `@defer`. (#2190) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit You can now view traces in Apollo Studio as normal. Also improved testing and fixed missing variables in Apollo tracing. Fixes #1600 #2186 Co-authored-by: bryn --- NEXT_CHANGELOG.md | 18 + apollo-router/Cargo.toml | 2 +- .../src/plugins/telemetry/apollo_exporter.rs | 14 +- apollo-router/src/plugins/telemetry/mod.rs | 39 +- .../src/plugins/telemetry/tracing/apollo.rs | 3 +- .../telemetry/tracing/apollo_telemetry.rs | 790 +++++++-- ...pollo_telemetry__test__condition_else.snap | 593 +++++++ ..._apollo_telemetry__test__condition_if.snap | 606 +++++++ ...ing__apollo_telemetry__test__trace_id.snap | 609 +++++++ .../testdata/condition_else_spandata.yaml | 1252 ++++++++++++++ .../testdata/condition_if_spandata.yaml | 1441 +++++++++++++++++ apollo-router/src/query_planner/execution.rs | 268 +-- apollo-router/src/query_planner/mod.rs | 6 + apollo-router/src/query_planner/plan.rs | 11 - ...ation_tests__traced_basic_composition.snap | 68 +- ...tegration_tests__traced_basic_request.snap | 19 +- xtask/src/commands/compliance.rs | 50 +- 17 files changed, 5370 insertions(+), 419 deletions(-) create mode 100644 apollo-router/src/plugins/telemetry/tracing/snapshots/apollo_router__plugins__telemetry__tracing__apollo_telemetry__test__condition_else.snap create mode 100644 apollo-router/src/plugins/telemetry/tracing/snapshots/apollo_router__plugins__telemetry__tracing__apollo_telemetry__test__condition_if.snap create mode 100644 apollo-router/src/plugins/telemetry/tracing/snapshots/apollo_router__plugins__telemetry__tracing__apollo_telemetry__test__trace_id.snap create mode 100644 apollo-router/src/plugins/telemetry/tracing/testdata/condition_else_spandata.yaml create mode 100644 apollo-router/src/plugins/telemetry/tracing/testdata/condition_if_spandata.yaml diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 88956f16cf..19abfa8f58 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -238,6 +238,12 @@ supergraph: By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2155 +### `@defer` Apollo tracing support ([Issue #1600](https://github.com/apollographql/router/issues/1600)) + +Added Apollo tracing support for queries that use `@defer`. You can now view traces in Apollo Studio as normal. + +By [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographql/router/pull/2190 + ## 🐛 Fixes ### Fix panic when dev mode enabled with empty config file ([Issue #2182](https://github.com/apollographql/router/issues/2182)) @@ -246,6 +252,18 @@ If you're running the Router with dev mode with an empty config file, it will no By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/2165 +### Fix missing apollo tracing variables ([Issue #2186](https://github.com/apollographql/router/issues/2186)) + +Send variable values had no effect. This is now fixed. +```yaml +telemetry: + apollo: + send_variable_values: all +``` + +By [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographql/router/pull/2190 + + ### fix build_docker_image.sh script when using default repo ([PR #2163](https://github.com/apollographql/router/pull/2163)) Adding the `-r` flag recently broke the existing functionality to build from the default repo using `-b`. This fixes that. diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 680e383837..4b4fd1d7de 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -202,7 +202,7 @@ uname = "0.1.1" uname = "0.1.1" [dev-dependencies] -insta = { version = "1.21.2", features = ["json", "redactions"] } +insta = { version = "1.21.2", features = ["json", "redactions", "yaml"] } introspector-gadget = "0.1.0" maplit = "1.0.2" memchr = { version = "2.5.0", default-features = false } diff --git a/apollo-router/src/plugins/telemetry/apollo_exporter.rs b/apollo-router/src/plugins/telemetry/apollo_exporter.rs index 8392ae2781..2028b667c1 100644 --- a/apollo-router/src/plugins/telemetry/apollo_exporter.rs +++ b/apollo-router/src/plugins/telemetry/apollo_exporter.rs @@ -1,4 +1,8 @@ //! Configuration for apollo telemetry exporter. +#[cfg(test)] +use std::sync::Arc; +#[cfg(test)] +use std::sync::Mutex; // This entire file is license key functionality use std::time::Duration; @@ -31,20 +35,26 @@ pub(crate) const POOL_TIMEOUT: Duration = Duration::from_secs(5); pub(crate) enum Sender { Noop, Spaceport(mpsc::Sender), + #[cfg(test)] + InMemory(Arc>>), } impl Sender { - pub(crate) fn send(&self, metrics: SingleReport) { + pub(crate) fn send(&self, report: SingleReport) { match &self { Sender::Noop => {} Sender::Spaceport(channel) => { - if let Err(err) = channel.to_owned().try_send(metrics) { + if let Err(err) = channel.to_owned().try_send(report) { tracing::warn!( "could not send metrics to spaceport, metric will be dropped: {}", err ); } } + #[cfg(test)] + Sender::InMemory(storage) => { + storage.lock().expect("mutex poisoned").push(report); + } } } } diff --git a/apollo-router/src/plugins/telemetry/mod.rs b/apollo-router/src/plugins/telemetry/mod.rs index 33295f8d1c..7aae5c34e9 100644 --- a/apollo-router/src/plugins/telemetry/mod.rs +++ b/apollo-router/src/plugins/telemetry/mod.rs @@ -122,7 +122,6 @@ const CLIENT_VERSION: &str = "apollo_telemetry::client_version"; const ATTRIBUTES: &str = "apollo_telemetry::metrics_attributes"; const SUBGRAPH_ATTRIBUTES: &str = "apollo_telemetry::subgraph_metrics_attributes"; pub(crate) const STUDIO_EXCLUDE: &str = "apollo_telemetry::studio::exclude"; -pub(crate) const FTV1_DO_NOT_SAMPLE: &str = "apollo_telemetry::studio::ftv1_do_not_sample"; pub(crate) const LOGGING_DISPLAY_HEADERS: &str = "apollo_telemetry::logging::display_headers"; pub(crate) const LOGGING_DISPLAY_BODY: &str = "apollo_telemetry::logging::display_body"; const DEFAULT_SERVICE_NAME: &str = "apollo-router"; @@ -280,31 +279,9 @@ impl Plugin for Telemetry { fn execution_service(&self, service: execution::BoxService) -> execution::BoxService { ServiceBuilder::new() - .instrument(move |req: &ExecutionRequest| { - // disable ftv1 sampling for deferred queries - let do_not_sample_reason = if req.query_plan.root.contains_condition_or_defer() { - req.context.insert(FTV1_DO_NOT_SAMPLE, true).unwrap(); - "query is deferred" - } else { - "" - }; - let query = req - .supergraph_request - .body() - .query - .clone() - .unwrap_or_default(); - let operation_name = req - .supergraph_request - .body() - .operation_name - .clone() - .unwrap_or_default(); + .instrument(move |_req: &ExecutionRequest| { info_span!("execution", - graphql.document = query.as_str(), - graphql.operation.name = operation_name.as_str(), "otel.kind" = %SpanKind::Internal, - ftv1.do_not_sample_reason = do_not_sample_reason ) }) .service(service) @@ -707,7 +684,7 @@ impl Telemetry { apollo_private.http.request_headers = field::Empty ); - if is_span_sampled(&request.context) { + if is_span_sampled() { span.record( "apollo_private.graphql.variables", &Self::filter_variables_values( @@ -792,7 +769,7 @@ impl Telemetry { } }) .fold(BTreeMap::new(), |mut acc, (name, value)| { - acc.entry(name).or_insert_with(Vec::new).push(value); + acc.insert(name, value); acc }); @@ -1185,7 +1162,7 @@ impl Telemetry { has_errors: bool, duration: Duration, ) { - if is_span_sampled(context) { + if is_span_sampled() { ::tracing::trace!("span is sampled then skip the apollo metrics"); return; } @@ -1294,12 +1271,8 @@ fn handle_error>(err: T) { } #[inline] -pub(crate) fn is_span_sampled(context: &Context) -> bool { +pub(crate) fn is_span_sampled() -> bool { Span::current().context().span().span_context().is_sampled() - && !context - .get(FTV1_DO_NOT_SAMPLE) - .unwrap_or_default() - .unwrap_or(false) } register_plugin!("apollo", "telemetry", Telemetry); @@ -1315,7 +1288,7 @@ enum ApolloFtv1Handler { impl ApolloFtv1Handler { fn request_ftv1(&self, mut req: SubgraphRequest) -> SubgraphRequest { if let ApolloFtv1Handler::Enabled = self { - if is_span_sampled(&req.context) { + if is_span_sampled() { req.subgraph_request.headers_mut().insert( "apollo-federation-include-trace", HeaderValue::from_static("ftv1"), diff --git a/apollo-router/src/plugins/telemetry/tracing/apollo.rs b/apollo-router/src/plugins/telemetry/tracing/apollo.rs index 969c1a2247..46ddbb785a 100644 --- a/apollo-router/src/plugins/telemetry/tracing/apollo.rs +++ b/apollo-router/src/plugins/telemetry/tracing/apollo.rs @@ -11,7 +11,7 @@ use crate::plugins::telemetry::tracing::TracingConfigurator; use crate::spaceport::Trace; impl TracingConfigurator for Config { - fn apply(&self, builder: Builder, trace_config: &config::Trace) -> Result { + fn apply(&self, builder: Builder, _trace_config: &config::Trace) -> Result { tracing::debug!("configuring Apollo tracing"); Ok(match self { Config { @@ -28,7 +28,6 @@ impl TracingConfigurator for Config { let exporter = apollo_telemetry::Exporter::builder() .expose_trace_id_config(expose_trace_id.clone()) - .trace_config(trace_config.clone()) .endpoint(endpoint.clone()) .apollo_key(key) .apollo_graph_ref(reference) diff --git a/apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs b/apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs index c8cddba186..9ed26f161a 100644 --- a/apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs +++ b/apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs @@ -1,10 +1,10 @@ -use std::borrow::Cow; use std::collections::HashMap; use std::io::Cursor; use std::time::SystemTimeError; use async_trait::async_trait; use derivative::Derivative; +use itertools::Itertools; use lru::LruCache; use opentelemetry::sdk::export::trace::ExportResult; use opentelemetry::sdk::export::trace::SpanData; @@ -13,6 +13,7 @@ use opentelemetry::trace::SpanId; use opentelemetry::Key; use opentelemetry::Value; use opentelemetry_semantic_conventions::trace::HTTP_METHOD; +use serde::de::DeserializeOwned; use thiserror::Error; use url::Url; @@ -21,17 +22,29 @@ use crate::plugins::telemetry::apollo::SingleReport; use crate::plugins::telemetry::apollo_exporter::ApolloExporter; use crate::plugins::telemetry::apollo_exporter::Sender; use crate::plugins::telemetry::config; +use crate::plugins::telemetry::config::ExposeTraceId; use crate::plugins::telemetry::config::Sampler; use crate::plugins::telemetry::config::SamplerOption; use crate::plugins::telemetry::tracing::apollo::TracesReport; use crate::plugins::telemetry::BoxError; use crate::plugins::telemetry::SUBGRAPH_SPAN_NAME; use crate::plugins::telemetry::SUPERGRAPH_SPAN_NAME; +use crate::query_planner::CONDITION_ELSE_SPAN_NAME; +use crate::query_planner::CONDITION_IF_SPAN_NAME; +use crate::query_planner::CONDITION_SPAN_NAME; +use crate::query_planner::DEFER_DEFERRED_SPAN_NAME; +use crate::query_planner::DEFER_PRIMARY_SPAN_NAME; +use crate::query_planner::DEFER_SPAN_NAME; use crate::query_planner::FETCH_SPAN_NAME; use crate::query_planner::FLATTEN_SPAN_NAME; use crate::query_planner::PARALLEL_SPAN_NAME; use crate::query_planner::SEQUENCE_SPAN_NAME; use crate::spaceport::trace::http::Values; +use crate::spaceport::trace::query_plan_node::ConditionNode; +use crate::spaceport::trace::query_plan_node::DeferNode; +use crate::spaceport::trace::query_plan_node::DeferNodePrimary; +use crate::spaceport::trace::query_plan_node::DeferredNode; +use crate::spaceport::trace::query_plan_node::DeferredNodeDepends; use crate::spaceport::trace::query_plan_node::FetchNode; use crate::spaceport::trace::query_plan_node::FlattenNode; use crate::spaceport::trace::query_plan_node::ParallelNode; @@ -52,11 +65,14 @@ const APOLLO_PRIVATE_HTTP_REQUEST_HEADERS: Key = pub(crate) const APOLLO_PRIVATE_OPERATION_SIGNATURE: Key = Key::from_static_str("apollo_private.operation_signature"); const APOLLO_PRIVATE_FTV1: Key = Key::from_static_str("apollo_private.ftv1"); -const APOLLO_PRIVATE_PATH: Key = Key::from_static_str("apollo_private.path"); -const FTV1_DO_NOT_SAMPLE_REASON: Key = Key::from_static_str("ftv1.do_not_sample_reason"); +const PATH: Key = Key::from_static_str("graphql.path"); const SUBGRAPH_NAME: Key = Key::from_static_str("apollo.subgraph.name"); const CLIENT_NAME: Key = Key::from_static_str("client.name"); const CLIENT_VERSION: Key = Key::from_static_str("client.version"); +const DEPENDS: Key = Key::from_static_str("graphql.depends"); +const LABEL: Key = Key::from_static_str("graphql.label"); +const CONDITION: Key = Key::from_static_str("graphql.condition"); +const OPERATION_NAME: Key = Key::from_static_str("graphql.operation.name"); pub(crate) const DEFAULT_TRACE_ID_HEADER_NAME: &str = "apollo-trace-id"; #[derive(Error, Debug)] @@ -67,17 +83,14 @@ pub(crate) enum Error { #[error("subgraph trace payload was not base64")] Base64Decode(#[from] base64::DecodeError), - #[error("ftv1 span attribute should have been a string")] - Ftv1SpanAttribute, + #[error("trace parsing failed")] + TraceParsingFailed, #[error("there were multiple tracing errors")] MultipleErrors(Vec), #[error("duration could not be calculated")] SystemTime(#[from] SystemTimeError), - - #[error("this trace should not be sampled")] - DoNotSample(Cow<'static, str>), } /// A [`SpanExporter`] that writes to [`Reporter`]. @@ -88,10 +101,7 @@ pub(crate) enum Error { #[derivative(Debug)] pub(crate) struct Exporter { expose_trace_id_config: config::ExposeTraceId, - trace_config: config::Trace, spans_by_parent_id: LruCache>, - endpoint: Url, - schema_id: String, #[derivative(Debug = "ignore")] apollo_sender: Sender, field_execution_weight: f64, @@ -104,9 +114,15 @@ enum TreeData { client_name: Option, client_version: Option, operation_signature: String, + operation_name: String, + variables_json: HashMap, }, - QueryPlan(QueryPlanNode), - Trace(Result>, Error>), + QueryPlanNode(QueryPlanNode), + DeferPrimary(DeferNodePrimary), + DeferDeferred(DeferredNode), + ConditionIf(Option), + ConditionElse(Option), + Trace(Option, Error>>), } #[buildstructor::buildstructor] @@ -114,7 +130,6 @@ impl Exporter { #[builder] pub(crate) fn new( expose_trace_id_config: config::ExposeTraceId, - trace_config: config::Trace, endpoint: Url, apollo_key: String, apollo_graph_ref: String, @@ -128,9 +143,6 @@ impl Exporter { Ok(Self { expose_trace_id_config, spans_by_parent_id: LruCache::new(buffer_size), - trace_config, - endpoint, - schema_id, apollo_sender: apollo_exporter.provider(), field_execution_weight: match field_execution_sampler { Some(SamplerOption::Always(Sampler::AlwaysOn)) => 1.0, @@ -146,23 +158,7 @@ impl Exporter { span: &SpanData, child_nodes: Vec, ) -> Result, Error> { - let variables = span - .attributes - .get(&APOLLO_PRIVATE_GRAPHQL_VARIABLES) - .map(|data| data.as_str()) - .unwrap_or_default(); - let variables_json = if variables != "{}" { - serde_json::from_str(&variables).unwrap_or_default() - } else { - HashMap::new() - }; - - let details = Details { - variables_json, - ..Default::default() - }; - - let http = self.extract_http_data(span); + let http = extract_http_data(span, &self.expose_trace_id_config); let mut root_trace = crate::spaceport::Trace { start_time: Some(span.start_time.into()), @@ -170,18 +166,18 @@ impl Exporter { duration_ns: span .attributes .get(&APOLLO_PRIVATE_DURATION_NS) - .and_then(Self::extract_i64) + .and_then(extract_i64) .map(|e| e as u64) .unwrap_or_default(), root: None, - details: Some(details), + details: None, http: Some(http), ..Default::default() }; for node in child_nodes { match node { - TreeData::QueryPlan(query_plan) => { + TreeData::QueryPlanNode(query_plan) => { root_trace.query_plan = Some(Box::new(query_plan)) } TreeData::Supergraph { @@ -189,6 +185,8 @@ impl Exporter { client_name, client_version, operation_signature, + operation_name, + variables_json, } => { root_trace .http @@ -198,8 +196,11 @@ impl Exporter { root_trace.client_name = client_name.unwrap_or_default(); root_trace.client_version = client_version.unwrap_or_default(); root_trace.field_execution_weight = self.field_execution_weight; - // This will be moved out later root_trace.signature = operation_signature; + root_trace.details = Some(Details { + variables_json, + operation_name, + }); } _ => panic!("should never have had other node types"), } @@ -243,43 +244,26 @@ impl Exporter { if !errors.is_empty() { return Err(Error::MultipleErrors(errors)); } - if let Some(Value::String(reason)) = span.attributes.get(&FTV1_DO_NOT_SAMPLE_REASON) { - if !reason.is_empty() { - return Err(Error::DoNotSample(reason.clone())); - } - } Ok(match span.name.as_ref() { - PARALLEL_SPAN_NAME => vec![TreeData::QueryPlan(QueryPlanNode { + PARALLEL_SPAN_NAME => vec![TreeData::QueryPlanNode(QueryPlanNode { node: Some(crate::spaceport::trace::query_plan_node::Node::Parallel( ParallelNode { - nodes: child_nodes - .into_iter() - .filter_map(|child| match child { - TreeData::QueryPlan(node) => Some(node), - _ => None, - }) - .collect(), + nodes: child_nodes.remove_query_plan_nodes(), }, )), })], - SEQUENCE_SPAN_NAME => vec![TreeData::QueryPlan(QueryPlanNode { + SEQUENCE_SPAN_NAME => vec![TreeData::QueryPlanNode(QueryPlanNode { node: Some(crate::spaceport::trace::query_plan_node::Node::Sequence( SequenceNode { - nodes: child_nodes - .into_iter() - .filter_map(|child| match child { - TreeData::QueryPlan(node) => Some(node), - _ => None, - }) - .collect(), + nodes: child_nodes.remove_query_plan_nodes(), }, )), })], FETCH_SPAN_NAME => { let (trace_parsing_failed, trace) = match child_nodes.pop() { - Some(TreeData::Trace(Ok(trace))) => (false, trace), - Some(TreeData::Trace(Err(_err))) => (true, None), + Some(TreeData::Trace(Some(Ok(trace)))) => (false, Some(trace)), + Some(TreeData::Trace(Some(Err(_err)))) => (true, None), _ => (false, None), }; let service_name = (span @@ -289,7 +273,7 @@ impl Exporter { .unwrap_or_else(|| Value::String("unknown service".into())) .as_str()) .to_string(); - vec![TreeData::QueryPlan(QueryPlanNode { + vec![TreeData::QueryPlanNode(QueryPlanNode { node: Some(crate::spaceport::trace::query_plan_node::Node::Fetch( Box::new(FetchNode { service_name, @@ -298,7 +282,7 @@ impl Exporter { sent_time_offset: span .attributes .get(&APOLLO_PRIVATE_SENT_TIME_OFFSET) - .and_then(Self::extract_i64) + .and_then(extract_i64) .map(|f| f as u64) .unwrap_or_default(), sent_time: Some(span.start_time.into()), @@ -308,52 +292,49 @@ impl Exporter { })] } FLATTEN_SPAN_NAME => { - vec![TreeData::QueryPlan(QueryPlanNode { + vec![TreeData::QueryPlanNode(QueryPlanNode { node: Some(crate::spaceport::trace::query_plan_node::Node::Flatten( Box::new(FlattenNode { response_path: span .attributes - .get(&APOLLO_PRIVATE_PATH) - .and_then(Self::extract_string) - .map(|v| { - v.split('/').filter(|v|!v.is_empty() && *v != "@").map(|v| { - if let Ok(index) = v.parse::() { - ResponsePathElement { id: Some(crate::spaceport::trace::query_plan_node::response_path_element::Id::Index(index))} - } else { - ResponsePathElement { id: Some(crate::spaceport::trace::query_plan_node::response_path_element::Id::FieldName(v.to_string())) } - } - }).collect() - }).unwrap_or_default(), - node: child_nodes - .into_iter() - .filter_map(|child| match child { - TreeData::QueryPlan(node) => Some(Box::new(node)), - _ => None, - }) - .next(), + .get(&PATH) + .map(extract_path) + .unwrap_or_default(), + node: child_nodes.remove_first_query_plan_node().map(Box::new), }), )), })] } SUBGRAPH_SPAN_NAME => { - vec![TreeData::Trace(self.find_ftv1_trace(span))] + vec![TreeData::Trace( + span.attributes + .get(&APOLLO_PRIVATE_FTV1) + .and_then(extract_ftv1_trace), + )] } SUPERGRAPH_SPAN_NAME => { //Currently some data is in the supergraph span as we don't have the a request hook in plugin. child_nodes.push(TreeData::Supergraph { - http: self.extract_http_data(span), - client_name: span - .attributes - .get(&CLIENT_NAME) - .and_then(Self::extract_string), + http: extract_http_data(span, &self.expose_trace_id_config), + client_name: span.attributes.get(&CLIENT_NAME).and_then(extract_string), client_version: span .attributes .get(&CLIENT_VERSION) - .and_then(Self::extract_string), + .and_then(extract_string), operation_signature: span .attributes .get(&APOLLO_PRIVATE_OPERATION_SIGNATURE) - .and_then(Self::extract_string) + .and_then(extract_string) + .unwrap_or_default(), + operation_name: span + .attributes + .get(&OPERATION_NAME) + .and_then(extract_string) + .unwrap_or_default(), + variables_json: span + .attributes + .get(&APOLLO_PRIVATE_GRAPHQL_VARIABLES) + .and_then(extract_json) .unwrap_or_default(), }); child_nodes @@ -363,98 +344,181 @@ impl Exporter { self.extract_root_trace(span, child_nodes), )] } + DEFER_SPAN_NAME => { + vec![TreeData::QueryPlanNode(QueryPlanNode { + node: Some(crate::spaceport::trace::query_plan_node::Node::Defer( + Box::new(DeferNode { + primary: child_nodes.remove_first_defer_primary_node().map(Box::new), + deferred: child_nodes.remove_defer_deferred_nodes(), + }), + )), + })] + } + DEFER_PRIMARY_SPAN_NAME => { + vec![TreeData::DeferPrimary(DeferNodePrimary { + node: child_nodes.remove_first_query_plan_node().map(Box::new), + })] + } + DEFER_DEFERRED_SPAN_NAME => { + vec![TreeData::DeferDeferred(DeferredNode { + node: child_nodes.remove_first_query_plan_node(), + path: span + .attributes + .get(&PATH) + .map(extract_path) + .unwrap_or_default(), + // In theory we don't have to do the transformation here, but it is safer to do so. + depends: span + .attributes + .get(&DEPENDS) + .and_then(extract_json::>) + .unwrap_or_default() + .iter() + .map(|d| DeferredNodeDepends { + id: d.id.clone(), + defer_label: d.defer_label.clone().unwrap_or_default(), + }) + .collect(), + label: span + .attributes + .get(&LABEL) + .and_then(extract_string) + .unwrap_or_default(), + })] + } + + CONDITION_SPAN_NAME => { + vec![TreeData::QueryPlanNode(QueryPlanNode { + node: Some(crate::spaceport::trace::query_plan_node::Node::Condition( + Box::new(ConditionNode { + condition: span + .attributes + .get(&CONDITION) + .and_then(extract_string) + .unwrap_or_default(), + if_clause: child_nodes.remove_first_condition_if_node().map(Box::new), + else_clause: child_nodes + .remove_first_condition_else_node() + .map(Box::new), + }), + )), + })] + } + CONDITION_IF_SPAN_NAME => { + vec![TreeData::ConditionIf( + child_nodes.remove_first_query_plan_node(), + )] + } + CONDITION_ELSE_SPAN_NAME => { + vec![TreeData::ConditionElse( + child_nodes.remove_first_query_plan_node(), + )] + } _ => child_nodes, }) } +} - fn extract_string(v: &Value) -> Option { - if let Value::String(v) = v { - Some(v.to_string()) - } else { - None - } - } +fn extract_json(v: &Value) -> Option { + extract_string(v) + .map(|v| serde_json::from_str(&v)) + .transpose() + .unwrap_or(None) +} - fn extract_i64(v: &Value) -> Option { - if let Value::I64(v) = v { - Some(*v) - } else { - None - } +fn extract_string(v: &Value) -> Option { + if let Value::String(v) = v { + Some(v.to_string()) + } else { + None } +} - fn find_ftv1_trace( - &mut self, - span: &SpanData, - ) -> Result>, Error> { - span.attributes - .get(&APOLLO_PRIVATE_FTV1) - .map(|data| { - if let Value::String(data) = data { - Ok(Box::new(crate::spaceport::Trace::decode(Cursor::new( - base64::decode(data.to_string())?, - ))?)) +fn extract_path(v: &Value) -> Vec { + extract_string(v) + .map(|v| { + v.split('/').filter(|v|!v.is_empty() && *v != "@").map(|v| { + if let Ok(index) = v.parse::() { + ResponsePathElement { id: Some(crate::spaceport::trace::query_plan_node::response_path_element::Id::Index(index))} } else { - Err(Error::Ftv1SpanAttribute) + ResponsePathElement { id: Some(crate::spaceport::trace::query_plan_node::response_path_element::Id::FieldName(v.to_string())) } } - }) - .transpose() + }).collect() + }).unwrap_or_default() +} + +fn extract_i64(v: &Value) -> Option { + if let Value::I64(v) = v { + Some(*v) + } else { + None } +} - fn extract_http_data(&self, span: &SpanData) -> Http { - let method = match span - .attributes - .get(&HTTP_METHOD) - .map(|data| data.as_str()) - .unwrap_or_default() - .as_ref() - { - "OPTIONS" => crate::spaceport::trace::http::Method::Options, - "GET" => crate::spaceport::trace::http::Method::Get, - "HEAD" => crate::spaceport::trace::http::Method::Head, - "POST" => crate::spaceport::trace::http::Method::Post, - "PUT" => crate::spaceport::trace::http::Method::Put, - "DELETE" => crate::spaceport::trace::http::Method::Delete, - "TRACE" => crate::spaceport::trace::http::Method::Trace, - "CONNECT" => crate::spaceport::trace::http::Method::Connect, - "PATCH" => crate::spaceport::trace::http::Method::Patch, - _ => crate::spaceport::trace::http::Method::Unknown, - }; - let headers = span - .attributes - .get(&APOLLO_PRIVATE_HTTP_REQUEST_HEADERS) - .map(|data| data.as_str()) - .unwrap_or_default(); - let request_headers = serde_json::from_str::>>(&headers) - .unwrap_or_default() - .into_iter() - .map(|(header_name, value)| (header_name.to_lowercase(), Values { value })) - .collect(); - // For now, only trace_id - let response_headers = if self.expose_trace_id_config.enabled { - let mut res = HashMap::with_capacity(1); - res.insert( - self.expose_trace_id_config - .header_name - .as_ref() - .map(|h| h.to_string()) - .unwrap_or_else(|| DEFAULT_TRACE_ID_HEADER_NAME.to_string()), - Values { - value: vec![span.span_context.trace_id().to_string()], - }, - ); +fn extract_ftv1_trace(v: &Value) -> Option, Error>> { + if let Some(v) = extract_string(v) { + if let Ok(v) = base64::decode(v) { + if let Ok(t) = crate::spaceport::Trace::decode(Cursor::new(v)) { + return Some(Ok(Box::new(t))); + } + } - res - } else { - HashMap::new() - }; + return Some(Err(Error::TraceParsingFailed)); + } + None +} - Http { - method: method.into(), - request_headers, - response_headers, - status_code: 0, - } +fn extract_http_data(span: &SpanData, expose_trace_id_config: &ExposeTraceId) -> Http { + let method = match span + .attributes + .get(&HTTP_METHOD) + .map(|data| data.as_str()) + .unwrap_or_default() + .as_ref() + { + "OPTIONS" => crate::spaceport::trace::http::Method::Options, + "GET" => crate::spaceport::trace::http::Method::Get, + "HEAD" => crate::spaceport::trace::http::Method::Head, + "POST" => crate::spaceport::trace::http::Method::Post, + "PUT" => crate::spaceport::trace::http::Method::Put, + "DELETE" => crate::spaceport::trace::http::Method::Delete, + "TRACE" => crate::spaceport::trace::http::Method::Trace, + "CONNECT" => crate::spaceport::trace::http::Method::Connect, + "PATCH" => crate::spaceport::trace::http::Method::Patch, + _ => crate::spaceport::trace::http::Method::Unknown, + }; + let request_headers = span + .attributes + .get(&APOLLO_PRIVATE_HTTP_REQUEST_HEADERS) + .and_then(extract_json::>>) + .unwrap_or_default() + .into_iter() + .map(|(header_name, value)| (header_name.to_lowercase(), Values { value })) + .collect(); + // For now, only trace_id + let response_headers = if expose_trace_id_config.enabled { + let mut res = HashMap::with_capacity(1); + res.insert( + expose_trace_id_config + .header_name + .as_ref() + .map(|h| h.to_string()) + .unwrap_or_else(|| DEFAULT_TRACE_ID_HEADER_NAME.to_string()), + Values { + value: vec![span.span_context.trace_id().to_string()], + }, + ); + + res + } else { + HashMap::new() + }; + + Http { + method: method.into(), + request_headers, + response_headers, + status_code: 0, } } @@ -470,6 +534,17 @@ impl SpanExporter for Exporter { let mut traces: Vec<(String, crate::spaceport::Trace)> = Vec::new(); for span in batch { if span.name == REQUEST_SPAN_NAME { + // Write spans for testing + // You can obtain new span data by uncommenting the following code and executing a query. + // In general this isn't something we'll want to do often, we are just verifying that the exporter constructs a correct report. + // let mut c = self + // .spans_by_parent_id + // .iter() + // .flat_map(|(_, s)| s.iter()) + // .collect::>(); + // c.push(&span); + // std::fs::write("spandata.yaml", serde_yaml::to_string(&c).unwrap()).unwrap(); + match self.extract_trace(span) { Ok(mut trace) => { let mut operation_signature = Default::default(); @@ -479,17 +554,10 @@ impl SpanExporter for Exporter { } } Err(Error::MultipleErrors(errors)) => { - if let Some(Error::DoNotSample(reason)) = errors.first() { - tracing::debug!( - "sampling is disabled on this trace: {}, skipping", - reason - ); - } else { - tracing::error!( - "failed to construct trace: {}, skipping", - Error::MultipleErrors(errors) - ); - } + tracing::error!( + "failed to construct trace: {}, skipping", + Error::MultipleErrors(errors) + ); } Err(error) => { tracing::error!("failed to construct trace: {}, skipping", error); @@ -514,3 +582,365 @@ impl SpanExporter for Exporter { return ExportResult::Ok(()); } } + +trait ChildNodes { + fn remove_first_query_plan_node(&mut self) -> Option; + fn remove_query_plan_nodes(&mut self) -> Vec; + fn remove_first_defer_primary_node(&mut self) -> Option; + fn remove_defer_deferred_nodes(&mut self) -> Vec; + fn remove_first_condition_if_node(&mut self) -> Option; + fn remove_first_condition_else_node(&mut self) -> Option; +} + +impl ChildNodes for Vec { + fn remove_first_query_plan_node(&mut self) -> Option { + if let Some((idx, _)) = self + .iter() + .find_position(|child| matches!(child, TreeData::QueryPlanNode(_))) + { + if let TreeData::QueryPlanNode(node) = self.remove(idx) { + return Some(node); + } + } + None + } + + fn remove_query_plan_nodes(&mut self) -> Vec { + let mut extracted = Vec::new(); + let mut retained = Vec::new(); + for treedata in self.drain(0..self.len()) { + if let TreeData::QueryPlanNode(node) = treedata { + extracted.push(node); + } else { + retained.push(treedata) + } + } + self.append(&mut retained); + extracted + } + + fn remove_first_defer_primary_node(&mut self) -> Option { + if let Some((idx, _)) = self + .iter() + .find_position(|child| matches!(child, TreeData::DeferPrimary(_))) + { + if let TreeData::DeferPrimary(node) = self.remove(idx) { + return Some(node); + } + } + None + } + + fn remove_defer_deferred_nodes(&mut self) -> Vec { + let mut extracted = Vec::new(); + let mut retained = Vec::new(); + for treedata in self.drain(0..self.len()) { + if let TreeData::DeferDeferred(node) = treedata { + extracted.push(node); + } else { + retained.push(treedata) + } + } + self.append(&mut retained); + extracted + } + + fn remove_first_condition_if_node(&mut self) -> Option { + if let Some((idx, _)) = self + .iter() + .find_position(|child| matches!(child, TreeData::ConditionIf(_))) + { + if let TreeData::ConditionIf(node) = self.remove(idx) { + return node; + } + } + None + } + + fn remove_first_condition_else_node(&mut self) -> Option { + if let Some((idx, _)) = self + .iter() + .find_position(|child| matches!(child, TreeData::ConditionElse(_))) + { + if let TreeData::ConditionElse(node) = self.remove(idx) { + return node; + } + } + None + } +} + +#[buildstructor::buildstructor] +#[cfg(test)] +impl Exporter { + #[builder] + pub(crate) fn test_new(expose_trace_id_config: Option) -> Self { + Exporter { + expose_trace_id_config: expose_trace_id_config.unwrap_or_default(), + spans_by_parent_id: LruCache::unbounded(), + apollo_sender: Sender::InMemory(Default::default()), + field_execution_weight: 1.0, + } + } +} + +#[cfg(test)] +mod test { + use std::borrow::Cow; + + use http::header::HeaderName; + use opentelemetry::sdk::export::trace::SpanExporter; + use opentelemetry::Value; + use prost::Message; + use serde_json::json; + + use crate::plugins::telemetry::apollo::SingleReport; + use crate::plugins::telemetry::apollo_exporter::Sender; + use crate::plugins::telemetry::config::ExposeTraceId; + use crate::plugins::telemetry::tracing::apollo_telemetry::extract_ftv1_trace; + use crate::plugins::telemetry::tracing::apollo_telemetry::extract_i64; + use crate::plugins::telemetry::tracing::apollo_telemetry::extract_json; + use crate::plugins::telemetry::tracing::apollo_telemetry::extract_path; + use crate::plugins::telemetry::tracing::apollo_telemetry::extract_string; + use crate::plugins::telemetry::tracing::apollo_telemetry::ChildNodes; + use crate::plugins::telemetry::tracing::apollo_telemetry::Exporter; + use crate::plugins::telemetry::tracing::apollo_telemetry::TreeData; + use crate::spaceport; + use crate::spaceport::trace::query_plan_node::response_path_element::Id; + use crate::spaceport::trace::query_plan_node::DeferNodePrimary; + use crate::spaceport::trace::query_plan_node::DeferredNode; + use crate::spaceport::trace::query_plan_node::ResponsePathElement; + use crate::spaceport::trace::QueryPlanNode; + + async fn report(mut exporter: Exporter, spandata: &str) -> SingleReport { + let spandata = serde_yaml::from_str(spandata).expect("test spans must be parsable"); + + exporter + .export(spandata) + .await + .expect("span export must succeed"); + assert!(matches!(exporter.apollo_sender, Sender::InMemory(_))); + if let Sender::InMemory(storage) = exporter.apollo_sender { + return storage + .lock() + .expect("lock poisoned") + .pop() + .expect("must have a report"); + } + panic!("cannot happen"); + } + + macro_rules! assert_report { + ($report: expr)=> { + insta::with_settings!({sort_maps => true}, { + insta::assert_yaml_snapshot!($report, { + ".**.seconds" => "[seconds]", + ".**.nanos" => "[nanos]", + ".**.duration_ns" => "[duration_ns]", + ".**.child[].start_time" => "[start_time]", + ".**.child[].end_time" => "[end_time]", + ".**.trace_id.value[]" => "[trace_id]", + ".**.sent_time_offset" => "[sent_time_offset]" + }); + }); + } + } + + #[tokio::test] + async fn test_condition_if() { + // The following curl request was used to generate this span data + // curl --request POST \ + // --header 'content-type: application/json' \ + // --header 'accept: multipart/mixed; deferSpec=20220824, application/json' \ + // --url http://localhost:4000/ \ + // --data '{"query":"query($if: Boolean!) {\n topProducts {\n name\n ... @defer(if: $if) {\n reviews {\n author {\n name\n }\n }\n reviews {\n author {\n name\n }\n }\n }\n }\n}","variables":{"if":true}}' + let spandata = include_str!("testdata/condition_if_spandata.yaml"); + let exporter = Exporter::test_builder().build(); + let report = report(exporter, spandata).await; + assert_report!(report); + } + + #[tokio::test] + async fn test_condition_else() { + // The following curl request was used to generate this span data + // curl --request POST \ + // --header 'content-type: application/json' \ + // --header 'accept: multipart/mixed; deferSpec=20220824, application/json' \ + // --url http://localhost:4000/ \ + // --data '{"query":"query($if: Boolean!) {\n topProducts {\n name\n ... @defer(if: $if) {\n reviews {\n author {\n name\n }\n }\n reviews {\n author {\n name\n }\n }\n }\n }\n}","variables":{"if":false}}' + let spandata = include_str!("testdata/condition_else_spandata.yaml"); + let exporter = Exporter::test_builder().build(); + let report = report(exporter, spandata).await; + assert_report!(report); + } + + #[tokio::test] + async fn test_trace_id() { + let spandata = include_str!("testdata/condition_if_spandata.yaml"); + let exporter = Exporter::test_builder() + .expose_trace_id_config(ExposeTraceId { + enabled: true, + header_name: Some(HeaderName::from_static("trace_id")), + }) + .build(); + let report = report(exporter, spandata).await; + assert_report!(report); + } + + fn elements(tree_data: Vec) -> Vec<&'static str> { + let mut elements = Vec::new(); + for t in tree_data { + match t { + TreeData::Request(_) => elements.push("request"), + TreeData::Supergraph { .. } => elements.push("supergraph"), + TreeData::QueryPlanNode(_) => elements.push("query_plan_node"), + TreeData::DeferPrimary(_) => elements.push("defer_primary"), + TreeData::DeferDeferred(_) => elements.push("defer_deferred"), + TreeData::ConditionIf(_) => elements.push("condition_if"), + TreeData::ConditionElse(_) => elements.push("condition_else"), + TreeData::Trace(_) => elements.push("trace"), + } + } + elements + } + + #[test] + fn remove_first_query_plan_node() { + let mut vec = vec![ + TreeData::Trace(None), + TreeData::QueryPlanNode(QueryPlanNode { node: None }), + TreeData::QueryPlanNode(QueryPlanNode { node: None }), + ]; + + assert!(vec.remove_first_query_plan_node().is_some()); + assert_eq!(elements(vec), ["trace", "query_plan_node"]); + } + + #[test] + fn remove_query_plan_nodes() { + let mut vec = vec![ + TreeData::Trace(None), + TreeData::QueryPlanNode(QueryPlanNode { node: None }), + TreeData::QueryPlanNode(QueryPlanNode { node: None }), + ]; + + assert_eq!(vec.remove_query_plan_nodes().len(), 2); + assert_eq!(elements(vec), ["trace"]); + } + + #[test] + fn remove_first_defer_primary_node() { + let mut vec = vec![ + TreeData::Trace(None), + TreeData::DeferPrimary(DeferNodePrimary { node: None }), + TreeData::DeferDeferred(DeferredNode { + depends: vec![], + label: "".to_string(), + path: Default::default(), + node: None, + }), + ]; + + assert!(vec.remove_first_defer_primary_node().is_some()); + assert_eq!(elements(vec), ["trace", "defer_deferred"]); + } + + #[test] + fn remove_defer_deferred_nodes() { + let mut vec = vec![ + TreeData::Trace(None), + TreeData::DeferPrimary(DeferNodePrimary { node: None }), + TreeData::DeferDeferred(DeferredNode { + depends: vec![], + label: "".to_string(), + path: Default::default(), + node: None, + }), + TreeData::DeferDeferred(DeferredNode { + depends: vec![], + label: "".to_string(), + path: Default::default(), + node: None, + }), + ]; + + assert_eq!(vec.remove_defer_deferred_nodes().len(), 2); + assert_eq!(elements(vec), ["trace", "defer_primary"]); + } + + #[test] + fn test_remove_first_condition_if_node() { + let mut vec = vec![ + TreeData::Trace(None), + TreeData::ConditionIf(Some(QueryPlanNode { node: None })), + TreeData::ConditionElse(Some(QueryPlanNode { node: None })), + ]; + + assert!(vec.remove_first_condition_if_node().is_some()); + assert_eq!(elements(vec), ["trace", "condition_else"]); + } + + #[test] + fn test_remove_first_condition_else_node() { + let mut vec = vec![ + TreeData::Trace(None), + TreeData::ConditionIf(Some(QueryPlanNode { node: None })), + TreeData::ConditionElse(Some(QueryPlanNode { node: None })), + ]; + + assert!(vec.remove_first_condition_else_node().is_some()); + assert_eq!(elements(vec), ["trace", "condition_if"]); + } + + #[test] + fn test_extract_json() { + let val = json!({"hi": "there"}); + assert_eq!( + extract_json::(&Value::String(Cow::Owned(val.to_string()))), + Some(val) + ); + } + + #[test] + fn test_extract_string() { + assert_eq!( + extract_string(&Value::String(Cow::Owned("hi".to_string()))), + Some("hi".to_string()) + ); + } + + #[test] + fn test_extract_path() { + assert_eq!( + extract_path(&Value::String(Cow::Owned("/hi/3/there".to_string()))), + vec![ + ResponsePathElement { + id: Some(Id::FieldName("hi".to_string())), + }, + ResponsePathElement { + id: Some(Id::Index(3)), + }, + ResponsePathElement { + id: Some(Id::FieldName("there".to_string())), + } + ] + ); + } + + #[test] + fn test_extract_i64() { + assert_eq!(extract_i64(&Value::I64(35)), Some(35)); + } + + #[test] + fn test_extract_ftv1_trace() { + let trace = spaceport::Trace::default(); + let encoded = base64::encode(trace.encode_to_vec()); + assert_eq!( + *extract_ftv1_trace(&Value::String(Cow::Owned(encoded))) + .expect("there was a trace here") + .expect("the trace must be decoded"), + trace + ); + } +} diff --git a/apollo-router/src/plugins/telemetry/tracing/snapshots/apollo_router__plugins__telemetry__tracing__apollo_telemetry__test__condition_else.snap b/apollo-router/src/plugins/telemetry/tracing/snapshots/apollo_router__plugins__telemetry__tracing__apollo_telemetry__test__condition_else.snap new file mode 100644 index 0000000000..b7fc231720 --- /dev/null +++ b/apollo-router/src/plugins/telemetry/tracing/snapshots/apollo_router__plugins__telemetry__tracing__apollo_telemetry__test__condition_else.snap @@ -0,0 +1,593 @@ +--- +source: apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs +expression: report +--- +Traces: + traces: + - - "# -\nquery($if:Boolean!){topProducts{name...@defer(if:$if){reviews{author{name}}reviews{author{name}}}}}" + - start_time: + seconds: "[seconds]" + nanos: "[nanos]" + end_time: + seconds: "[seconds]" + nanos: "[nanos]" + duration_ns: "[duration_ns]" + root: ~ + is_incomplete: false + signature: "" + unexecuted_operation_body: "" + unexecuted_operation_name: "" + details: + variables_json: + if: "false" + operation_name: "" + client_name: "" + client_version: "" + http: + method: 4 + request_headers: + accept: + value: + - "" + content-length: + value: + - "" + content-type: + value: + - "" + host: + value: + - "" + user-agent: + value: + - "" + response_headers: {} + status_code: 0 + cache_policy: ~ + query_plan: + node: + Condition: + condition: if + if_clause: ~ + else_clause: + node: + Sequence: + nodes: + - node: + Fetch: + service_name: products + trace_parsing_failed: false + trace: + start_time: + seconds: "[seconds]" + nanos: "[nanos]" + end_time: + seconds: "[seconds]" + nanos: "[nanos]" + duration_ns: "[duration_ns]" + root: + original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: 0 + end_time: 0 + error: [] + child: + - original_field_name: "" + type: "[Product]" + parent_type: Query + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: String! + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: upc + - original_field_name: "" + type: String + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: name + id: + Index: 0 + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: String! + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: upc + - original_field_name: "" + type: String + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: name + id: + Index: 1 + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: String! + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: upc + - original_field_name: "" + type: String + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: name + id: + Index: 2 + id: + ResponseName: topProducts + id: ~ + is_incomplete: false + signature: "" + unexecuted_operation_body: "" + unexecuted_operation_name: "" + details: ~ + client_name: "" + client_version: "" + http: ~ + cache_policy: ~ + query_plan: ~ + full_query_cache_hit: false + persisted_query_hit: false + persisted_query_register: false + registered_operation: false + forbidden_operation: false + field_execution_weight: 1 + sent_time_offset: "[sent_time_offset]" + sent_time: + seconds: "[seconds]" + nanos: "[nanos]" + received_time: + seconds: "[seconds]" + nanos: "[nanos]" + - node: + Flatten: + response_path: + - id: + FieldName: topProducts + node: + node: + Fetch: + service_name: reviews + trace_parsing_failed: false + trace: + start_time: + seconds: "[seconds]" + nanos: "[nanos]" + end_time: + seconds: "[seconds]" + nanos: "[nanos]" + duration_ns: "[duration_ns]" + root: + original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: 0 + end_time: 0 + error: [] + child: + - original_field_name: "" + type: "[_Entity]!" + parent_type: Query + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "[Review]" + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: User + parent_type: Review + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: ID! + parent_type: User + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: id + id: + ResponseName: author + id: + Index: 0 + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: User + parent_type: Review + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: ID! + parent_type: User + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: id + id: + ResponseName: author + id: + Index: 1 + id: + ResponseName: reviews + id: + Index: 0 + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "[Review]" + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: User + parent_type: Review + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: ID! + parent_type: User + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: id + id: + ResponseName: author + id: + Index: 0 + id: + ResponseName: reviews + id: + Index: 1 + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "[Review]" + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: User + parent_type: Review + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: ID! + parent_type: User + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: id + id: + ResponseName: author + id: + Index: 0 + id: + ResponseName: reviews + id: + Index: 2 + id: + ResponseName: _entities + id: ~ + is_incomplete: false + signature: "" + unexecuted_operation_body: "" + unexecuted_operation_name: "" + details: ~ + client_name: "" + client_version: "" + http: ~ + cache_policy: ~ + query_plan: ~ + full_query_cache_hit: false + persisted_query_hit: false + persisted_query_register: false + registered_operation: false + forbidden_operation: false + field_execution_weight: 1 + sent_time_offset: "[sent_time_offset]" + sent_time: + seconds: "[seconds]" + nanos: "[nanos]" + received_time: + seconds: "[seconds]" + nanos: "[nanos]" + - node: + Flatten: + response_path: + - id: + FieldName: topProducts + - id: + FieldName: reviews + - id: + FieldName: author + node: + node: + Fetch: + service_name: accounts + trace_parsing_failed: false + trace: + start_time: + seconds: "[seconds]" + nanos: "[nanos]" + end_time: + seconds: "[seconds]" + nanos: "[nanos]" + duration_ns: "[duration_ns]" + root: + original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: 0 + end_time: 0 + error: [] + child: + - original_field_name: "" + type: "[_Entity]!" + parent_type: Query + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: String + parent_type: User + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: name + id: + Index: 0 + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: String + parent_type: User + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: name + id: + Index: 1 + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: String + parent_type: User + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: name + id: + Index: 2 + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: String + parent_type: User + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: name + id: + Index: 3 + id: + ResponseName: _entities + id: ~ + is_incomplete: false + signature: "" + unexecuted_operation_body: "" + unexecuted_operation_name: "" + details: ~ + client_name: "" + client_version: "" + http: ~ + cache_policy: ~ + query_plan: ~ + full_query_cache_hit: false + persisted_query_hit: false + persisted_query_register: false + registered_operation: false + forbidden_operation: false + field_execution_weight: 1 + sent_time_offset: "[sent_time_offset]" + sent_time: + seconds: "[seconds]" + nanos: "[nanos]" + received_time: + seconds: "[seconds]" + nanos: "[nanos]" + full_query_cache_hit: false + persisted_query_hit: false + persisted_query_register: false + registered_operation: false + forbidden_operation: false + field_execution_weight: 1 + diff --git a/apollo-router/src/plugins/telemetry/tracing/snapshots/apollo_router__plugins__telemetry__tracing__apollo_telemetry__test__condition_if.snap b/apollo-router/src/plugins/telemetry/tracing/snapshots/apollo_router__plugins__telemetry__tracing__apollo_telemetry__test__condition_if.snap new file mode 100644 index 0000000000..72ca9af214 --- /dev/null +++ b/apollo-router/src/plugins/telemetry/tracing/snapshots/apollo_router__plugins__telemetry__tracing__apollo_telemetry__test__condition_if.snap @@ -0,0 +1,606 @@ +--- +source: apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs +expression: report +--- +Traces: + traces: + - - "# -\nquery($if:Boolean!){topProducts{name...@defer(if:$if){reviews{author{name}}reviews{author{name}}}}}" + - start_time: + seconds: "[seconds]" + nanos: "[nanos]" + end_time: + seconds: "[seconds]" + nanos: "[nanos]" + duration_ns: "[duration_ns]" + root: ~ + is_incomplete: false + signature: "" + unexecuted_operation_body: "" + unexecuted_operation_name: "" + details: + variables_json: + if: "true" + operation_name: "" + client_name: "" + client_version: "" + http: + method: 4 + request_headers: + accept: + value: + - "" + content-length: + value: + - "" + content-type: + value: + - "" + host: + value: + - "" + user-agent: + value: + - "" + response_headers: {} + status_code: 0 + cache_policy: ~ + query_plan: + node: + Condition: + condition: if + if_clause: + node: + Defer: + primary: + node: + node: + Fetch: + service_name: products + trace_parsing_failed: false + trace: + start_time: + seconds: "[seconds]" + nanos: "[nanos]" + end_time: + seconds: "[seconds]" + nanos: "[nanos]" + duration_ns: "[duration_ns]" + root: + original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: 0 + end_time: 0 + error: [] + child: + - original_field_name: "" + type: "[Product]" + parent_type: Query + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: String + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: name + - original_field_name: "" + type: String! + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: upc + id: + Index: 0 + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: String + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: name + - original_field_name: "" + type: String! + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: upc + id: + Index: 1 + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: String + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: name + - original_field_name: "" + type: String! + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: upc + id: + Index: 2 + id: + ResponseName: topProducts + id: ~ + is_incomplete: false + signature: "" + unexecuted_operation_body: "" + unexecuted_operation_name: "" + details: ~ + client_name: "" + client_version: "" + http: ~ + cache_policy: ~ + query_plan: ~ + full_query_cache_hit: false + persisted_query_hit: false + persisted_query_register: false + registered_operation: false + forbidden_operation: false + field_execution_weight: 1 + sent_time_offset: "[sent_time_offset]" + sent_time: + seconds: "[seconds]" + nanos: "[nanos]" + received_time: + seconds: "[seconds]" + nanos: "[nanos]" + deferred: + - depends: + - id: "0" + defer_label: "" + label: "" + path: + - id: + FieldName: topProducts + node: + node: + Sequence: + nodes: + - node: + Flatten: + response_path: + - id: + FieldName: topProducts + node: + node: + Fetch: + service_name: reviews + trace_parsing_failed: false + trace: + start_time: + seconds: "[seconds]" + nanos: "[nanos]" + end_time: + seconds: "[seconds]" + nanos: "[nanos]" + duration_ns: "[duration_ns]" + root: + original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: 0 + end_time: 0 + error: [] + child: + - original_field_name: "" + type: "[_Entity]!" + parent_type: Query + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "[Review]" + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: User + parent_type: Review + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: ID! + parent_type: User + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: id + id: + ResponseName: author + id: + Index: 0 + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: User + parent_type: Review + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: ID! + parent_type: User + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: id + id: + ResponseName: author + id: + Index: 1 + id: + ResponseName: reviews + id: + Index: 0 + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "[Review]" + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: User + parent_type: Review + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: ID! + parent_type: User + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: id + id: + ResponseName: author + id: + Index: 0 + id: + ResponseName: reviews + id: + Index: 1 + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "[Review]" + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: User + parent_type: Review + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: ID! + parent_type: User + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: id + id: + ResponseName: author + id: + Index: 0 + id: + ResponseName: reviews + id: + Index: 2 + id: + ResponseName: _entities + id: ~ + is_incomplete: false + signature: "" + unexecuted_operation_body: "" + unexecuted_operation_name: "" + details: ~ + client_name: "" + client_version: "" + http: ~ + cache_policy: ~ + query_plan: ~ + full_query_cache_hit: false + persisted_query_hit: false + persisted_query_register: false + registered_operation: false + forbidden_operation: false + field_execution_weight: 1 + sent_time_offset: "[sent_time_offset]" + sent_time: + seconds: "[seconds]" + nanos: "[nanos]" + received_time: + seconds: "[seconds]" + nanos: "[nanos]" + - node: + Flatten: + response_path: + - id: + FieldName: topProducts + - id: + FieldName: reviews + - id: + FieldName: author + node: + node: + Fetch: + service_name: accounts + trace_parsing_failed: false + trace: + start_time: + seconds: "[seconds]" + nanos: "[nanos]" + end_time: + seconds: "[seconds]" + nanos: "[nanos]" + duration_ns: "[duration_ns]" + root: + original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: 0 + end_time: 0 + error: [] + child: + - original_field_name: "" + type: "[_Entity]!" + parent_type: Query + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: String + parent_type: User + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: name + id: + Index: 0 + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: String + parent_type: User + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: name + id: + Index: 1 + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: String + parent_type: User + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: name + id: + Index: 2 + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: String + parent_type: User + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: name + id: + Index: 3 + id: + ResponseName: _entities + id: ~ + is_incomplete: false + signature: "" + unexecuted_operation_body: "" + unexecuted_operation_name: "" + details: ~ + client_name: "" + client_version: "" + http: ~ + cache_policy: ~ + query_plan: ~ + full_query_cache_hit: false + persisted_query_hit: false + persisted_query_register: false + registered_operation: false + forbidden_operation: false + field_execution_weight: 1 + sent_time_offset: "[sent_time_offset]" + sent_time: + seconds: "[seconds]" + nanos: "[nanos]" + received_time: + seconds: "[seconds]" + nanos: "[nanos]" + else_clause: ~ + full_query_cache_hit: false + persisted_query_hit: false + persisted_query_register: false + registered_operation: false + forbidden_operation: false + field_execution_weight: 1 + diff --git a/apollo-router/src/plugins/telemetry/tracing/snapshots/apollo_router__plugins__telemetry__tracing__apollo_telemetry__test__trace_id.snap b/apollo-router/src/plugins/telemetry/tracing/snapshots/apollo_router__plugins__telemetry__tracing__apollo_telemetry__test__trace_id.snap new file mode 100644 index 0000000000..706a60cea1 --- /dev/null +++ b/apollo-router/src/plugins/telemetry/tracing/snapshots/apollo_router__plugins__telemetry__tracing__apollo_telemetry__test__trace_id.snap @@ -0,0 +1,609 @@ +--- +source: apollo-router/src/plugins/telemetry/tracing/apollo_telemetry.rs +expression: report +--- +Traces: + traces: + - - "# -\nquery($if:Boolean!){topProducts{name...@defer(if:$if){reviews{author{name}}reviews{author{name}}}}}" + - start_time: + seconds: "[seconds]" + nanos: "[nanos]" + end_time: + seconds: "[seconds]" + nanos: "[nanos]" + duration_ns: "[duration_ns]" + root: ~ + is_incomplete: false + signature: "" + unexecuted_operation_body: "" + unexecuted_operation_name: "" + details: + variables_json: + if: "true" + operation_name: "" + client_name: "" + client_version: "" + http: + method: 4 + request_headers: + accept: + value: + - "" + content-length: + value: + - "" + content-type: + value: + - "" + host: + value: + - "" + user-agent: + value: + - "" + response_headers: + trace_id: + value: + - "[trace_id]" + status_code: 0 + cache_policy: ~ + query_plan: + node: + Condition: + condition: if + if_clause: + node: + Defer: + primary: + node: + node: + Fetch: + service_name: products + trace_parsing_failed: false + trace: + start_time: + seconds: "[seconds]" + nanos: "[nanos]" + end_time: + seconds: "[seconds]" + nanos: "[nanos]" + duration_ns: "[duration_ns]" + root: + original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: 0 + end_time: 0 + error: [] + child: + - original_field_name: "" + type: "[Product]" + parent_type: Query + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: String + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: name + - original_field_name: "" + type: String! + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: upc + id: + Index: 0 + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: String + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: name + - original_field_name: "" + type: String! + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: upc + id: + Index: 1 + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: String + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: name + - original_field_name: "" + type: String! + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: upc + id: + Index: 2 + id: + ResponseName: topProducts + id: ~ + is_incomplete: false + signature: "" + unexecuted_operation_body: "" + unexecuted_operation_name: "" + details: ~ + client_name: "" + client_version: "" + http: ~ + cache_policy: ~ + query_plan: ~ + full_query_cache_hit: false + persisted_query_hit: false + persisted_query_register: false + registered_operation: false + forbidden_operation: false + field_execution_weight: 1 + sent_time_offset: "[sent_time_offset]" + sent_time: + seconds: "[seconds]" + nanos: "[nanos]" + received_time: + seconds: "[seconds]" + nanos: "[nanos]" + deferred: + - depends: + - id: "0" + defer_label: "" + label: "" + path: + - id: + FieldName: topProducts + node: + node: + Sequence: + nodes: + - node: + Flatten: + response_path: + - id: + FieldName: topProducts + node: + node: + Fetch: + service_name: reviews + trace_parsing_failed: false + trace: + start_time: + seconds: "[seconds]" + nanos: "[nanos]" + end_time: + seconds: "[seconds]" + nanos: "[nanos]" + duration_ns: "[duration_ns]" + root: + original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: 0 + end_time: 0 + error: [] + child: + - original_field_name: "" + type: "[_Entity]!" + parent_type: Query + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "[Review]" + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: User + parent_type: Review + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: ID! + parent_type: User + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: id + id: + ResponseName: author + id: + Index: 0 + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: User + parent_type: Review + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: ID! + parent_type: User + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: id + id: + ResponseName: author + id: + Index: 1 + id: + ResponseName: reviews + id: + Index: 0 + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "[Review]" + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: User + parent_type: Review + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: ID! + parent_type: User + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: id + id: + ResponseName: author + id: + Index: 0 + id: + ResponseName: reviews + id: + Index: 1 + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "[Review]" + parent_type: Product + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: User + parent_type: Review + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: ID! + parent_type: User + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: id + id: + ResponseName: author + id: + Index: 0 + id: + ResponseName: reviews + id: + Index: 2 + id: + ResponseName: _entities + id: ~ + is_incomplete: false + signature: "" + unexecuted_operation_body: "" + unexecuted_operation_name: "" + details: ~ + client_name: "" + client_version: "" + http: ~ + cache_policy: ~ + query_plan: ~ + full_query_cache_hit: false + persisted_query_hit: false + persisted_query_register: false + registered_operation: false + forbidden_operation: false + field_execution_weight: 1 + sent_time_offset: "[sent_time_offset]" + sent_time: + seconds: "[seconds]" + nanos: "[nanos]" + received_time: + seconds: "[seconds]" + nanos: "[nanos]" + - node: + Flatten: + response_path: + - id: + FieldName: topProducts + - id: + FieldName: reviews + - id: + FieldName: author + node: + node: + Fetch: + service_name: accounts + trace_parsing_failed: false + trace: + start_time: + seconds: "[seconds]" + nanos: "[nanos]" + end_time: + seconds: "[seconds]" + nanos: "[nanos]" + duration_ns: "[duration_ns]" + root: + original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: 0 + end_time: 0 + error: [] + child: + - original_field_name: "" + type: "[_Entity]!" + parent_type: Query + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: String + parent_type: User + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: name + id: + Index: 0 + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: String + parent_type: User + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: name + id: + Index: 1 + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: String + parent_type: User + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: name + id: + Index: 2 + - original_field_name: "" + type: "" + parent_type: "" + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: + - original_field_name: "" + type: String + parent_type: User + cache_policy: ~ + start_time: "[start_time]" + end_time: "[end_time]" + error: [] + child: [] + id: + ResponseName: name + id: + Index: 3 + id: + ResponseName: _entities + id: ~ + is_incomplete: false + signature: "" + unexecuted_operation_body: "" + unexecuted_operation_name: "" + details: ~ + client_name: "" + client_version: "" + http: ~ + cache_policy: ~ + query_plan: ~ + full_query_cache_hit: false + persisted_query_hit: false + persisted_query_register: false + registered_operation: false + forbidden_operation: false + field_execution_weight: 1 + sent_time_offset: "[sent_time_offset]" + sent_time: + seconds: "[seconds]" + nanos: "[nanos]" + received_time: + seconds: "[seconds]" + nanos: "[nanos]" + else_clause: ~ + full_query_cache_hit: false + persisted_query_hit: false + persisted_query_register: false + registered_operation: false + forbidden_operation: false + field_execution_weight: 1 + diff --git a/apollo-router/src/plugins/telemetry/tracing/testdata/condition_else_spandata.yaml b/apollo-router/src/plugins/telemetry/tracing/testdata/condition_else_spandata.yaml new file mode 100644 index 0000000000..158ec9527b --- /dev/null +++ b/apollo-router/src/plugins/telemetry/tracing/testdata/condition_else_spandata.yaml @@ -0,0 +1,1252 @@ +--- +- span_context: + trace_id: 122960190946202642409915189222685399094 + span_id: 14287822546131581520 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 17691584597033290046 + span_kind: Internal + name: supergraph + start_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 867241222 + end_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 996401974 + attributes: + map: + code.filepath: + String: apollo-router/src/plugins/telemetry/mod.rs + code.lineno: + I64: 673 + graphql.operation.name: + String: "" + apollo_private.http.request_headers: + String: "{\"accept\":[\"\"],\"content-length\":[\"\"],\"content-type\":[\"\"],\"host\":[\"\"],\"user-agent\":[\"\"]}" + client.version: + String: "" + graphql.document: + String: "query($if: Boolean!) {\n topProducts {\n name\n ... @defer(if: $if) {\n reviews {\n author {\n name\n }\n }\n reviews {\n author {\n name\n }\n }\n }\n }\n}" + apollo_private.graphql.variables: + String: "{\"if\":\"false\"}" + thread.name: + String: tokio-runtime-worker + busy_ns: + I64: 8691539 + code.namespace: + String: "apollo_router::plugins::telemetry" + client.name: + String: "" + idle_ns: + I64: 120357048 + thread.id: + I64: 16 + apollo_private.field_level_instrumentation_ratio: + F64: 1.0 + apollo_private.operation_signature: + String: "# -\nquery($if:Boolean!){topProducts{name...@defer(if:$if){reviews{author{name}}reviews{author{name}}}}}" + evict_list: + - idle_ns + - busy_ns + - apollo_private.operation_signature + - apollo_private.http.request_headers + - apollo_private.graphql.variables + - apollo_private.field_level_instrumentation_ratio + - client.version + - client.name + - graphql.operation.name + - graphql.document + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 122960190946202642409915189222685399094 + span_id: 15237674702119070751 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 14287822546131581520 + span_kind: Internal + name: query_planning + start_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 867688559 + end_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 867984269 + attributes: + map: + thread.name: + String: tokio-runtime-worker + code.namespace: + String: "apollo_router::services::supergraph_service" + busy_ns: + I64: 202332 + thread.id: + I64: 16 + code.filepath: + String: apollo-router/src/services/supergraph_service.rs + idle_ns: + I64: 89188 + code.lineno: + I64: 247 + graphql.document: + String: "query($if: Boolean!) {\n topProducts {\n name\n ... @defer(if: $if) {\n reviews {\n author {\n name\n }\n }\n reviews {\n author {\n name\n }\n }\n }\n }\n}" + graphql.operation.name: + String: "" + evict_list: + - idle_ns + - busy_ns + - graphql.operation.name + - graphql.document + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 122960190946202642409915189222685399094 + span_id: 13865784024556574003 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 14287822546131581520 + span_kind: Internal + name: execution + start_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 868258608 + end_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 995684698 + attributes: + map: + code.namespace: + String: "apollo_router::plugins::telemetry" + thread.name: + String: tokio-runtime-worker + thread.id: + I64: 16 + idle_ns: + I64: 119214086 + busy_ns: + I64: 8228208 + code.filepath: + String: apollo-router/src/plugins/telemetry/mod.rs + code.lineno: + I64: 283 + evict_list: + - idle_ns + - busy_ns + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 122960190946202642409915189222685399094 + span_id: 6782401025275156233 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 13865784024556574003 + span_kind: Internal + name: condition + start_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 868378386 + end_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 995541872 + attributes: + map: + thread.name: + String: tokio-runtime-worker + code.lineno: + I64: 327 + thread.id: + I64: 16 + code.namespace: + String: "apollo_router::query_planner::execution" + busy_ns: + I64: 7912453 + code.filepath: + String: apollo-router/src/query_planner/execution.rs + graphql.condition: + String: if + idle_ns: + I64: 119255502 + evict_list: + - idle_ns + - busy_ns + - graphql.condition + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 122960190946202642409915189222685399094 + span_id: 8824695332656836540 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 6782401025275156233 + span_kind: Internal + name: condition_else + start_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 868436285 + end_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 995438855 + attributes: + map: + thread.name: + String: tokio-runtime-worker + code.lineno: + I64: 320 + code.namespace: + String: "apollo_router::query_planner::execution" + thread.id: + I64: 16 + code.filepath: + String: apollo-router/src/query_planner/execution.rs + idle_ns: + I64: 119291958 + busy_ns: + I64: 7712497 + evict_list: + - idle_ns + - busy_ns + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 122960190946202642409915189222685399094 + span_id: 16743763821444799861 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 8824695332656836540 + span_kind: Internal + name: sequence + start_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 868481682 + end_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 995362238 + attributes: + map: + idle_ns: + I64: 119470686 + code.filepath: + String: apollo-router/src/query_planner/execution.rs + code.lineno: + I64: 133 + thread.name: + String: tokio-runtime-worker + busy_ns: + I64: 7408892 + code.namespace: + String: "apollo_router::query_planner::execution" + thread.id: + I64: 16 + evict_list: + - idle_ns + - busy_ns + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 122960190946202642409915189222685399094 + span_id: 17407667985756513925 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 16743763821444799861 + span_kind: Internal + name: fetch + start_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 868543283 + end_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 917023578 + attributes: + map: + busy_ns: + I64: 1986511 + thread.id: + I64: 16 + apollo_private.sent_time_offset: + I64: 1459136 + idle_ns: + I64: 46482889 + code.namespace: + String: "apollo_router::query_planner::execution" + code.lineno: + I64: 188 + apollo.subgraph.name: + String: products + thread.name: + String: tokio-runtime-worker + code.filepath: + String: apollo-router/src/query_planner/execution.rs + evict_list: + - idle_ns + - busy_ns + - apollo_private.sent_time_offset + - apollo.subgraph.name + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 122960190946202642409915189222685399094 + span_id: 7936167443054682587 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 16743763821444799861 + span_kind: Internal + name: flatten + start_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 917215783 + end_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 955428350 + attributes: + map: + code.filepath: + String: apollo-router/src/query_planner/execution.rs + code.lineno: + I64: 176 + code.namespace: + String: "apollo_router::query_planner::execution" + graphql.path: + String: /topProducts/@ + idle_ns: + I64: 35904853 + busy_ns: + I64: 2314489 + thread.name: + String: tokio-runtime-worker + thread.id: + I64: 14 + evict_list: + - idle_ns + - busy_ns + - graphql.path + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 122960190946202642409915189222685399094 + span_id: 16253144712595734830 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 16743763821444799861 + span_kind: Internal + name: flatten + start_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 955665393 + end_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 995190497 + attributes: + map: + code.lineno: + I64: 176 + code.namespace: + String: "apollo_router::query_planner::execution" + thread.id: + I64: 16 + busy_ns: + I64: 2347943 + idle_ns: + I64: 37184285 + code.filepath: + String: apollo-router/src/query_planner/execution.rs + thread.name: + String: tokio-runtime-worker + graphql.path: + String: /topProducts/@/reviews/@/author + evict_list: + - idle_ns + - busy_ns + - graphql.path + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 122960190946202642409915189222685399094 + span_id: 15278772753518055685 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 16253144712595734830 + span_kind: Internal + name: fetch + start_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 955768549 + end_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 995068484 + attributes: + map: + apollo_private.sent_time_offset: + I64: 88681888 + code.filepath: + String: apollo-router/src/query_planner/execution.rs + code.namespace: + String: "apollo_router::query_planner::execution" + idle_ns: + I64: 37140006 + busy_ns: + I64: 2141909 + code.lineno: + I64: 188 + thread.id: + I64: 16 + thread.name: + String: tokio-runtime-worker + apollo.subgraph.name: + String: accounts + evict_list: + - idle_ns + - busy_ns + - apollo_private.sent_time_offset + - apollo.subgraph.name + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 122960190946202642409915189222685399094 + span_id: 11170049527526553842 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 15278772753518055685 + span_kind: Internal + name: subgraph + start_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 956111054 + end_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 994769700 + attributes: + map: + busy_ns: + I64: 1390132 + thread.name: + String: tokio-runtime-worker + code.filepath: + String: apollo-router/src/plugins/telemetry/mod.rs + graphql.operation.name: + String: "" + code.namespace: + String: "apollo_router::plugins::telemetry" + graphql.document: + String: "query($representations:[_Any!]!){_entities(representations:$representations){...on User{name}}}" + apollo.subgraph.name: + String: accounts + apollo_private.ftv1: + String: GgwIw7WnnAYQgLGs0gMiDAjDtaecBhCAsazSA1isvR5ysQFirgEKCV9lbnRpdGllcxoKW19FbnRpdHldIUDn0BFI35cXYiAQAGIcCgRuYW1lGgZTdHJpbmdA8+4YSKOvGWoEVXNlcmIgEAFiHAoEbmFtZRoGU3RyaW5nQKuIGkiFsRpqBFVzZXJiIBACYhwKBG5hbWUaBlN0cmluZ0Dz3hpIofYaagRVc2VyYiAQA2IcCgRuYW1lGgZTdHJpbmdAxasbSJvDG2oEVXNlcmoFUXVlcnn5AQAAAAAAAPA/ + idle_ns: + I64: 37270820 + thread.id: + I64: 16 + code.lineno: + I64: 313 + evict_list: + - idle_ns + - busy_ns + - apollo_private.ftv1 + - graphql.operation.name + - graphql.document + - apollo.subgraph.name + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 122960190946202642409915189222685399094 + span_id: 12193731640433132388 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 11170049527526553842 + span_kind: Client + name: subgraph_request + start_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 956430440 + end_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 993872582 + attributes: + map: + thread.id: + I64: 16 + code.filepath: + String: apollo-router/src/services/subgraph_service.rs + code.namespace: + String: "apollo_router::services::subgraph_service" + code.lineno: + I64: 173 + thread.name: + String: tokio-runtime-worker + net.peer.port: + String: "80" + net.transport: + String: ip_tcp + idle_ns: + I64: 37174926 + net.peer.name: + String: accounts.demo.starstuff.dev + http.route: + String: /graphql + apollo.subgraph.name: + String: accounts + busy_ns: + I64: 251431 + evict_list: + - idle_ns + - busy_ns + - apollo.subgraph.name + - net.transport + - http.route + - net.peer.port + - net.peer.name + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 122960190946202642409915189222685399094 + span_id: 17716600377366594972 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 7936167443054682587 + span_kind: Internal + name: fetch + start_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 917323270 + end_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 955316673 + attributes: + map: + apollo.subgraph.name: + String: reviews + code.filepath: + String: apollo-router/src/query_planner/execution.rs + thread.name: + String: tokio-runtime-worker + code.namespace: + String: "apollo_router::query_planner::execution" + thread.id: + I64: 14 + busy_ns: + I64: 2122145 + idle_ns: + I64: 35855963 + apollo_private.sent_time_offset: + I64: 50235421 + code.lineno: + I64: 188 + evict_list: + - idle_ns + - busy_ns + - apollo_private.sent_time_offset + - apollo.subgraph.name + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 122960190946202642409915189222685399094 + span_id: 12581790234036960487 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 17716600377366594972 + span_kind: Internal + name: subgraph + start_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 917638675 + end_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 955031718 + attributes: + map: + code.namespace: + String: "apollo_router::plugins::telemetry" + graphql.operation.name: + String: "" + thread.name: + String: tokio-runtime-worker + busy_ns: + I64: 1432596 + code.filepath: + String: apollo-router/src/plugins/telemetry/mod.rs + code.lineno: + I64: 313 + apollo.subgraph.name: + String: reviews + thread.id: + I64: 14 + graphql.document: + String: "query($representations:[_Any!]!){_entities(representations:$representations){...on Product{reviews{author{__typename id}}}}}" + idle_ns: + I64: 35964358 + apollo_private.ftv1: + String: GgwIw7WnnAYQgIadwAMiDAjDtaecBhDAgeC/A1iptVRynQNimgMKCV9lbnRpdGllcxoKW19FbnRpdHldIUCGqxRI7OwbYqMBEABingEKB3Jldmlld3MaCFtSZXZpZXddQOzKHki+hzBiOxAAYjcKBmF1dGhvchoEVXNlckDSmjJIxe1GYhcKAmlkGgNJRCFA6dZISJHFSWoEVXNlcmoGUmV2aWV3YjsQAWI3CgZhdXRob3IaBFVzZXJAlcs3SNH5SWIXCgJpZBoDSUQhQO/YSkidoktqBFVzZXJqBlJldmlld2oHUHJvZHVjdGJlEAFiYQoHcmV2aWV3cxoIW1Jldmlld11AmJgkSLmVO2I7EABiNwoGYXV0aG9yGgRVc2VyQMu0PEjrxEtiFwoCaWQaA0lEIUDRoExIieBMagRVc2VyagZSZXZpZXdqB1Byb2R1Y3RiZRACYmEKB3Jldmlld3MaCFtSZXZpZXddQJSRKUiz6j9iOxAAYjcKBmF1dGhvchoEVXNlckC9j0FI/YFNYhcKAmlkGgNJRCFAg/1NSM+oTmoEVXNlcmoGUmV2aWV3agdQcm9kdWN0agVRdWVyefkBAAAAAAAA8D8= + evict_list: + - idle_ns + - busy_ns + - apollo_private.ftv1 + - graphql.operation.name + - graphql.document + - apollo.subgraph.name + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 122960190946202642409915189222685399094 + span_id: 13059514760148115210 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 12581790234036960487 + span_kind: Client + name: subgraph_request + start_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 917936271 + end_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 954080612 + attributes: + map: + thread.id: + I64: 14 + net.transport: + String: ip_tcp + http.route: + String: /graphql + code.filepath: + String: apollo-router/src/services/subgraph_service.rs + thread.name: + String: tokio-runtime-worker + code.lineno: + I64: 173 + busy_ns: + I64: 238580 + net.peer.port: + String: "80" + idle_ns: + I64: 35889487 + net.peer.name: + String: reviews.demo.starstuff.dev + apollo.subgraph.name: + String: reviews + code.namespace: + String: "apollo_router::services::subgraph_service" + evict_list: + - idle_ns + - busy_ns + - apollo.subgraph.name + - net.transport + - http.route + - net.peer.port + - net.peer.name + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 122960190946202642409915189222685399094 + span_id: 12520246751970884479 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 17407667985756513925 + span_kind: Internal + name: subgraph + start_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 868753996 + end_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 916852605 + attributes: + map: + code.namespace: + String: "apollo_router::plugins::telemetry" + code.filepath: + String: apollo-router/src/plugins/telemetry/mod.rs + apollo.subgraph.name: + String: products + graphql.operation.name: + String: "" + graphql.document: + String: "{topProducts{__typename upc name}}" + idle_ns: + I64: 46604902 + apollo_private.ftv1: + String: GgwIw7WnnAYQwM3WrAMiDAjDtaecBhCAyZmsA1iQ8DJy/AFi+QEKC3RvcFByb2R1Y3RzGglbUHJvZHVjdF1AufwXSJb9ImJEEABiHwoDdXBjGgdTdHJpbmchQOHGJkjU7SdqB1Byb2R1Y3RiHwoEbmFtZRoGU3RyaW5nQNfIKEj3gSlqB1Byb2R1Y3RiRBABYh8KA3VwYxoHU3RyaW5nIUDOzCpInIEragdQcm9kdWN0Yh8KBG5hbWUaBlN0cmluZ0CGyitI6/MragdQcm9kdWN0YkQQAmIfCgN1cGMaB1N0cmluZyFAg5AtSIS9LWoHUHJvZHVjdGIfCgRuYW1lGgZTdHJpbmdAluMtSLqgLmoHUHJvZHVjdGoFUXVlcnn5AQAAAAAAAPA/ + busy_ns: + I64: 1496082 + code.lineno: + I64: 313 + thread.id: + I64: 16 + thread.name: + String: tokio-runtime-worker + evict_list: + - idle_ns + - busy_ns + - apollo_private.ftv1 + - graphql.operation.name + - graphql.document + - apollo.subgraph.name + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 122960190946202642409915189222685399094 + span_id: 3398738514204614748 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 12520246751970884479 + span_kind: Client + name: subgraph_request + start_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 869023655 + end_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 915894445 + attributes: + map: + net.peer.name: + String: products.demo.starstuff.dev + code.namespace: + String: "apollo_router::services::subgraph_service" + idle_ns: + I64: 46547911 + http.route: + String: /graphql + net.peer.port: + String: "80" + net.transport: + String: ip_tcp + thread.id: + I64: 16 + busy_ns: + I64: 304371 + apollo.subgraph.name: + String: products + code.filepath: + String: apollo-router/src/services/subgraph_service.rs + code.lineno: + I64: 173 + thread.name: + String: tokio-runtime-worker + evict_list: + - idle_ns + - busy_ns + - apollo.subgraph.name + - net.transport + - http.route + - net.peer.port + - net.peer.name + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 122960190946202642409915189222685399094 + span_id: 17691584597033290046 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 0 + span_kind: Server + name: request + start_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 866508162 + end_time: + secs_since_epoch: 1669978819 + nanos_since_epoch: 996777793 + attributes: + map: + trace_id: + String: 5c8145789355263a300c728c5e71d836 + http.flavor: + String: HTTP/1.1 + http.method: + String: POST + code.namespace: + String: "apollo_router::axum_factory::utils" + code.filepath: + String: apollo-router/src/axum_factory/utils.rs + apollo_private.duration_ns: + I64: 130300920 + http.route: + String: / + code.lineno: + I64: 259 + busy_ns: + I64: 11197815 + thread.id: + I64: 16 + idle_ns: + I64: 119064623 + thread.name: + String: tokio-runtime-worker + evict_list: + - idle_ns + - busy_ns + - apollo_private.duration_ns + - trace_id + - http.flavor + - http.route + - http.method + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Ok + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 diff --git a/apollo-router/src/plugins/telemetry/tracing/testdata/condition_if_spandata.yaml b/apollo-router/src/plugins/telemetry/tracing/testdata/condition_if_spandata.yaml new file mode 100644 index 0000000000..a9ced9409c --- /dev/null +++ b/apollo-router/src/plugins/telemetry/tracing/testdata/condition_if_spandata.yaml @@ -0,0 +1,1441 @@ +--- +- span_context: + trace_id: 48179106241482103848812575548686436429 + span_id: 2226108203856432052 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 6681943477281517192 + span_kind: Internal + name: supergraph + start_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 694878120 + end_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 831374780 + attributes: + map: + graphql.document: + String: "query($if: Boolean!) {\n topProducts {\n name\n ... @defer(if: $if) {\n reviews {\n author {\n name\n }\n }\n reviews {\n author {\n name\n }\n }\n }\n }\n}" + client.version: + String: "" + graphql.operation.name: + String: "" + apollo_private.http.request_headers: + String: "{\"accept\":[\"\"],\"content-length\":[\"\"],\"content-type\":[\"\"],\"host\":[\"\"],\"user-agent\":[\"\"]}" + apollo_private.field_level_instrumentation_ratio: + F64: 1.0 + busy_ns: + I64: 3579254 + code.filepath: + String: apollo-router/src/plugins/telemetry/mod.rs + apollo_private.operation_signature: + String: "# -\nquery($if:Boolean!){topProducts{name...@defer(if:$if){reviews{author{name}}reviews{author{name}}}}}" + apollo_private.graphql.variables: + String: "{\"if\":\"true\"}" + idle_ns: + I64: 132902529 + code.lineno: + I64: 673 + client.name: + String: "" + thread.name: + String: tokio-runtime-worker + thread.id: + I64: 15 + code.namespace: + String: "apollo_router::plugins::telemetry" + evict_list: + - idle_ns + - busy_ns + - apollo_private.operation_signature + - apollo_private.http.request_headers + - apollo_private.graphql.variables + - apollo_private.field_level_instrumentation_ratio + - client.version + - client.name + - graphql.operation.name + - graphql.document + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 48179106241482103848812575548686436429 + span_id: 7005602647906255377 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 2226108203856432052 + span_kind: Internal + name: query_planning + start_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 695299406 + end_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 695547275 + attributes: + map: + thread.name: + String: tokio-runtime-worker + code.filepath: + String: apollo-router/src/services/supergraph_service.rs + code.lineno: + I64: 247 + graphql.operation.name: + String: "" + thread.id: + I64: 15 + graphql.document: + String: "query($if: Boolean!) {\n topProducts {\n name\n ... @defer(if: $if) {\n reviews {\n author {\n name\n }\n }\n reviews {\n author {\n name\n }\n }\n }\n }\n}" + busy_ns: + I64: 194440 + idle_ns: + I64: 70121 + code.namespace: + String: "apollo_router::services::supergraph_service" + evict_list: + - idle_ns + - busy_ns + - graphql.operation.name + - graphql.document + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 48179106241482103848812575548686436429 + span_id: 5496672787700345629 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 2226108203856432052 + span_kind: Internal + name: execution + start_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 695807715 + end_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 830738870 + attributes: + map: + code.filepath: + String: apollo-router/src/plugins/telemetry/mod.rs + code.lineno: + I64: 283 + busy_ns: + I64: 2888938 + code.namespace: + String: "apollo_router::plugins::telemetry" + thread.id: + I64: 15 + thread.name: + String: tokio-runtime-worker + idle_ns: + I64: 45262821 + evict_list: + - idle_ns + - busy_ns + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 48179106241482103848812575548686436429 + span_id: 8052379820523592996 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 5496672787700345629 + span_kind: Internal + name: condition + start_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 695918484 + end_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 830671333 + attributes: + map: + code.namespace: + String: "apollo_router::query_planner::execution" + idle_ns: + I64: 45284960 + code.lineno: + I64: 327 + thread.name: + String: tokio-runtime-worker + busy_ns: + I64: 2690658 + thread.id: + I64: 15 + code.filepath: + String: apollo-router/src/query_planner/execution.rs + graphql.condition: + String: if + evict_list: + - idle_ns + - busy_ns + - graphql.condition + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 48179106241482103848812575548686436429 + span_id: 14511206469911177014 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 8052379820523592996 + span_kind: Internal + name: condition_if + start_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 695977151 + end_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 830608824 + attributes: + map: + code.lineno: + I64: 306 + thread.name: + String: tokio-runtime-worker + busy_ns: + I64: 2606848 + code.namespace: + String: "apollo_router::query_planner::execution" + thread.id: + I64: 15 + idle_ns: + I64: 45301861 + code.filepath: + String: apollo-router/src/query_planner/execution.rs + evict_list: + - idle_ns + - busy_ns + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 48179106241482103848812575548686436429 + span_id: 12847835672039219100 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 14511206469911177014 + span_kind: Internal + name: defer + start_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 696022339 + end_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 830518170 + attributes: + map: + thread.name: + String: tokio-runtime-worker + idle_ns: + I64: 126457525 + busy_ns: + I64: 8029995 + code.filepath: + String: apollo-router/src/query_planner/execution.rs + code.lineno: + I64: 271 + thread.id: + I64: 15 + code.namespace: + String: "apollo_router::query_planner::execution" + evict_list: + - idle_ns + - busy_ns + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 48179106241482103848812575548686436429 + span_id: 4412163015592220412 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 12847835672039219100 + span_kind: Internal + name: defer_primary + start_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 696362399 + end_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 743760914 + attributes: + map: + code.namespace: + String: "apollo_router::query_planner::execution" + code.lineno: + I64: 259 + thread.id: + I64: 15 + thread.name: + String: tokio-runtime-worker + code.filepath: + String: apollo-router/src/query_planner/execution.rs + idle_ns: + I64: 45358226 + busy_ns: + I64: 2046016 + evict_list: + - idle_ns + - busy_ns + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 48179106241482103848812575548686436429 + span_id: 1343706171599318485 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 12847835672039219100 + span_kind: Internal + name: defer_deferred + start_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 743955493 + end_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 830061683 + attributes: + map: + graphql.depends: + String: "[{\"id\":\"0\",\"deferLabel\":null}]" + code.filepath: + String: apollo-router/src/query_planner/execution.rs + idle_ns: + I64: 79860304 + thread.name: + String: tokio-runtime-worker + graphql.path: + String: /topProducts/@ + code.lineno: + I64: 429 + code.namespace: + String: "apollo_router::query_planner::execution" + thread.id: + I64: 14 + busy_ns: + I64: 6254826 + evict_list: + - idle_ns + - busy_ns + - graphql.path + - graphql.depends + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 48179106241482103848812575548686436429 + span_id: 1615094331109834215 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 1343706171599318485 + span_kind: Internal + name: sequence + start_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 744057462 + end_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 829959505 + attributes: + map: + code.filepath: + String: apollo-router/src/query_planner/execution.rs + code.lineno: + I64: 133 + thread.id: + I64: 14 + busy_ns: + I64: 5920423 + idle_ns: + I64: 79979594 + thread.name: + String: tokio-runtime-worker + code.namespace: + String: "apollo_router::query_planner::execution" + evict_list: + - idle_ns + - busy_ns + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 48179106241482103848812575548686436429 + span_id: 9487579781210749637 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 1615094331109834215 + span_kind: Internal + name: flatten + start_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 744129818 + end_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 785840064 + attributes: + map: + idle_ns: + I64: 39187070 + code.namespace: + String: "apollo_router::query_planner::execution" + thread.name: + String: tokio-runtime-worker + code.filepath: + String: apollo-router/src/query_planner/execution.rs + code.lineno: + I64: 176 + thread.id: + I64: 14 + busy_ns: + I64: 2518567 + graphql.path: + String: /topProducts/@ + evict_list: + - idle_ns + - busy_ns + - graphql.path + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 48179106241482103848812575548686436429 + span_id: 4988545802786443624 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 1615094331109834215 + span_kind: Internal + name: flatten + start_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 786138149 + end_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 829728538 + attributes: + map: + thread.name: + String: tokio-runtime-worker + idle_ns: + I64: 40783864 + graphql.path: + String: /topProducts/@/reviews/@/author + busy_ns: + I64: 2806594 + code.namespace: + String: "apollo_router::query_planner::execution" + code.lineno: + I64: 176 + code.filepath: + String: apollo-router/src/query_planner/execution.rs + thread.id: + I64: 10 + evict_list: + - idle_ns + - busy_ns + - graphql.path + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 48179106241482103848812575548686436429 + span_id: 4477074139770004131 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 4988545802786443624 + span_kind: Internal + name: fetch + start_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 786225452 + end_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 829575654 + attributes: + map: + apollo_private.sent_time_offset: + I64: 91499517 + thread.id: + I64: 10 + code.namespace: + String: "apollo_router::query_planner::execution" + code.filepath: + String: apollo-router/src/query_planner/execution.rs + thread.name: + String: tokio-runtime-worker + code.lineno: + I64: 188 + apollo.subgraph.name: + String: accounts + busy_ns: + I64: 2573183 + idle_ns: + I64: 40767661 + evict_list: + - idle_ns + - busy_ns + - apollo_private.sent_time_offset + - apollo.subgraph.name + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 48179106241482103848812575548686436429 + span_id: 4307400006762176495 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 4477074139770004131 + span_kind: Internal + name: subgraph + start_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 786630674 + end_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 829172108 + attributes: + map: + code.filepath: + String: apollo-router/src/plugins/telemetry/mod.rs + code.lineno: + I64: 313 + thread.id: + I64: 10 + graphql.document: + String: "query($representations:[_Any!]!){_entities(representations:$representations){...on User{name}}}" + busy_ns: + I64: 1669429 + thread.name: + String: tokio-runtime-worker + idle_ns: + I64: 40867534 + code.namespace: + String: "apollo_router::plugins::telemetry" + apollo_private.ftv1: + String: GgwI+LWnnAYQwMHbggMiDAj4taecBhCAvZ6CA1j2sDRysQFirgEKCV9lbnRpdGllcxoKW19FbnRpdHldIUD0/xtIsqwlYiAQAGIcCgRuYW1lGgZTdHJpbmdA5JEpSOaUKmoEVXNlcmIgEAFiHAoEbmFtZRoGU3RyaW5nQITYK0iQlSxqBFVzZXJiIBACYhwKBG5hbWUaBlN0cmluZ0Ckgy1IiLYtagRVc2VyYiAQA2IcCgRuYW1lGgZTdHJpbmdAlrUuSIiDL2oEVXNlcmoFUXVlcnn5AQAAAAAAAPA/ + graphql.operation.name: + String: "" + apollo.subgraph.name: + String: accounts + evict_list: + - idle_ns + - busy_ns + - apollo_private.ftv1 + - graphql.operation.name + - graphql.document + - apollo.subgraph.name + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 48179106241482103848812575548686436429 + span_id: 15682297860951097268 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 4307400006762176495 + span_kind: Client + name: subgraph_request + start_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 786922194 + end_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 827988149 + attributes: + map: + code.namespace: + String: "apollo_router::services::subgraph_service" + net.peer.port: + String: "80" + apollo.subgraph.name: + String: accounts + code.filepath: + String: apollo-router/src/services/subgraph_service.rs + thread.name: + String: tokio-runtime-worker + http.route: + String: /graphql + idle_ns: + I64: 40798740 + code.lineno: + I64: 173 + net.peer.name: + String: accounts.demo.starstuff.dev + net.transport: + String: ip_tcp + thread.id: + I64: 10 + busy_ns: + I64: 244936 + evict_list: + - idle_ns + - busy_ns + - apollo.subgraph.name + - net.transport + - http.route + - net.peer.port + - net.peer.name + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 48179106241482103848812575548686436429 + span_id: 374273098096638287 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 9487579781210749637 + span_kind: Internal + name: fetch + start_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 744222848 + end_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 785694374 + attributes: + map: + thread.id: + I64: 14 + apollo.subgraph.name: + String: reviews + apollo_private.sent_time_offset: + I64: 49483643 + code.filepath: + String: apollo-router/src/query_planner/execution.rs + idle_ns: + I64: 39184766 + code.namespace: + String: "apollo_router::query_planner::execution" + code.lineno: + I64: 188 + thread.name: + String: tokio-runtime-worker + busy_ns: + I64: 2283967 + evict_list: + - idle_ns + - busy_ns + - apollo_private.sent_time_offset + - apollo.subgraph.name + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 48179106241482103848812575548686436429 + span_id: 12633132268468348696 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 374273098096638287 + span_kind: Internal + name: subgraph + start_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 744492507 + end_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 785324771 + attributes: + map: + code.filepath: + String: apollo-router/src/plugins/telemetry/mod.rs + apollo_private.ftv1: + String: GgwI+LWnnAYQgImV7wIiDAj4taecBhDA+93tAljn6poBcqgDYqUDCglfZW50aXRpZXMaCltfRW50aXR5XSFAqZQjSIKrL2KoARAAYqMBCgdyZXZpZXdzGghbUmV2aWV3XUC3/jNIzL9UYj0QAGI5CgZhdXRob3IaBFVzZXJApP1XSLPbfmIZCgJpZBoDSUQhQOflggFImIKEAWoEVXNlcmoGUmV2aWV3Yj4QAWI6CgZhdXRob3IaBFVzZXJA+eFgSNTHhAFiGQoCaWQaA0lEIUDyuYYBSNKmhwFqBFVzZXJqBlJldmlld2oHUHJvZHVjdGJoEAFiZAoHcmV2aWV3cxoIW1Jldmlld11A/IRCSIT0Z2I+EABiOgoGYXV0aG9yGgRVc2VyQJmBakjV0ocBYhkKAmlkGgNJRCFAp/eIAUiLsYkBagRVc2VyagZSZXZpZXdqB1Byb2R1Y3RiaBACYmQKB3Jldmlld3MaCFtSZXZpZXddQN6gSkjqnnJiPhAAYjoKBmF1dGhvchoEVXNlckDFsnVIt9WJAWIZCgJpZBoDSUQhQKvligFIj5yLAWoEVXNlcmoGUmV2aWV3agdQcm9kdWN0agVRdWVyefkBAAAAAAAA8D8= + thread.id: + I64: 14 + thread.name: + String: tokio-runtime-worker + apollo.subgraph.name: + String: reviews + code.lineno: + I64: 313 + graphql.operation.name: + String: "" + busy_ns: + I64: 1537429 + code.namespace: + String: "apollo_router::plugins::telemetry" + graphql.document: + String: "query($representations:[_Any!]!){_entities(representations:$representations){...on Product{reviews{author{__typename id}}}}}" + idle_ns: + I64: 39296791 + evict_list: + - idle_ns + - busy_ns + - apollo_private.ftv1 + - graphql.operation.name + - graphql.document + - apollo.subgraph.name + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 48179106241482103848812575548686436429 + span_id: 11564887256002176974 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 12633132268468348696 + span_kind: Client + name: subgraph_request + start_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 744791989 + end_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 784339792 + attributes: + map: + apollo.subgraph.name: + String: reviews + idle_ns: + I64: 39220524 + code.lineno: + I64: 173 + http.route: + String: /graphql + thread.name: + String: tokio-runtime-worker + net.peer.name: + String: reviews.demo.starstuff.dev + code.namespace: + String: "apollo_router::services::subgraph_service" + code.filepath: + String: apollo-router/src/services/subgraph_service.rs + net.peer.port: + String: "80" + thread.id: + I64: 14 + net.transport: + String: ip_tcp + busy_ns: + I64: 303323 + evict_list: + - idle_ns + - busy_ns + - apollo.subgraph.name + - net.transport + - http.route + - net.peer.port + - net.peer.name + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 48179106241482103848812575548686436429 + span_id: 13334624373206909001 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 4412163015592220412 + span_kind: Internal + name: fetch + start_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 696431123 + end_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 743652030 + attributes: + map: + code.namespace: + String: "apollo_router::query_planner::execution" + thread.name: + String: tokio-runtime-worker + code.lineno: + I64: 188 + thread.id: + I64: 15 + busy_ns: + I64: 1848154 + apollo.subgraph.name: + String: products + idle_ns: + I64: 45372963 + code.filepath: + String: apollo-router/src/query_planner/execution.rs + apollo_private.sent_time_offset: + I64: 1705118 + evict_list: + - idle_ns + - busy_ns + - apollo_private.sent_time_offset + - apollo.subgraph.name + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 48179106241482103848812575548686436429 + span_id: 357853883512773358 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 13334624373206909001 + span_kind: Internal + name: subgraph + start_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 696595810 + end_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 743458778 + attributes: + map: + apollo_private.ftv1: + String: GgwI+LWnnAYQgMyR2wIiDAj4taecBhDAvtrZAli8irwBcokCYoYCCgt0b3BQcm9kdWN0cxoJW1Byb2R1Y3RdQJWWE0jj/KQBYkgQAGIhCgRuYW1lGgZTdHJpbmdAuqerAUiEsqwBagdQcm9kdWN0YiEKA3VwYxoHU3RyaW5nIUDtkq4BSIXgrgFqB1Byb2R1Y3RiSBABYiEKBG5hbWUaBlN0cmluZ0CKoLABSMjOsAFqB1Byb2R1Y3RiIQoDdXBjGgdTdHJpbmchQKSDsQFIjfyzAWoHUHJvZHVjdGJIEAJiIQoEbmFtZRoGU3RyaW5nQPGPtQFIsOO1AWoHUHJvZHVjdGIhCgN1cGMaB1N0cmluZyFA7pm2AUjwvLYBagdQcm9kdWN0agVRdWVyefkBAAAAAAAA8D8= + busy_ns: + I64: 1359192 + code.filepath: + String: apollo-router/src/plugins/telemetry/mod.rs + idle_ns: + I64: 45508525 + code.lineno: + I64: 313 + apollo.subgraph.name: + String: products + code.namespace: + String: "apollo_router::plugins::telemetry" + thread.id: + I64: 15 + thread.name: + String: tokio-runtime-worker + graphql.document: + String: "{topProducts{__typename name upc}}" + graphql.operation.name: + String: "" + evict_list: + - idle_ns + - busy_ns + - apollo_private.ftv1 + - graphql.operation.name + - graphql.document + - apollo.subgraph.name + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 48179106241482103848812575548686436429 + span_id: 18275235823533906477 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 357853883512773358 + span_kind: Client + name: subgraph_request + start_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 696871895 + end_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 742639533 + attributes: + map: + net.peer.name: + String: products.demo.starstuff.dev + http.route: + String: /graphql + code.lineno: + I64: 173 + busy_ns: + I64: 304929 + thread.id: + I64: 15 + thread.name: + String: tokio-runtime-worker + apollo.subgraph.name: + String: products + code.namespace: + String: "apollo_router::services::subgraph_service" + net.peer.port: + String: "80" + code.filepath: + String: apollo-router/src/services/subgraph_service.rs + idle_ns: + I64: 45448321 + net.transport: + String: ip_tcp + evict_list: + - idle_ns + - busy_ns + - apollo.subgraph.name + - net.transport + - http.route + - net.peer.port + - net.peer.name + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Unset + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 +- span_context: + trace_id: 48179106241482103848812575548686436429 + span_id: 6681943477281517192 + trace_flags: 1 + is_remote: false + trace_state: ~ + parent_span_id: 0 + span_kind: Server + name: request + start_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 694077802 + end_time: + secs_since_epoch: 1669978872 + nanos_since_epoch: 831560350 + attributes: + map: + idle_ns: + I64: 131480409 + apollo_private.duration_ns: + I64: 50655519 + code.namespace: + String: "apollo_router::axum_factory::utils" + code.filepath: + String: apollo-router/src/axum_factory/utils.rs + thread.name: + String: tokio-runtime-worker + http.method: + String: POST + http.flavor: + String: HTTP/1.1 + busy_ns: + I64: 5802181 + trace_id: + String: 243ef555cc5e90c7f03a859b1c3f884d + thread.id: + I64: 15 + http.route: + String: / + code.lineno: + I64: 259 + evict_list: + - idle_ns + - busy_ns + - apollo_private.duration_ns + - trace_id + - http.flavor + - http.route + - http.method + - thread.name + - thread.id + - code.lineno + - code.namespace + - code.filepath + max_len: 128 + dropped_count: 0 + events: + queue: ~ + max_len: 128 + dropped_count: 0 + links: + queue: ~ + max_len: 128 + dropped_count: 0 + status_code: Ok + status_message: "" + resource: + attrs: + foo: + String: bar + process.executable.name: + String: router + service.name: + String: router + service.version: + String: 1.4.0 diff --git a/apollo-router/src/query_planner/execution.rs b/apollo-router/src/query_planner/execution.rs index 21dcc605bb..363115b02b 100644 --- a/apollo-router/src/query_planner/execution.rs +++ b/apollo-router/src/query_planner/execution.rs @@ -21,6 +21,12 @@ use crate::json_ext::Value; use crate::json_ext::ValueExt; use crate::query_planner::FlattenNode; use crate::query_planner::Primary; +use crate::query_planner::CONDITION_ELSE_SPAN_NAME; +use crate::query_planner::CONDITION_IF_SPAN_NAME; +use crate::query_planner::CONDITION_SPAN_NAME; +use crate::query_planner::DEFER_DEFERRED_SPAN_NAME; +use crate::query_planner::DEFER_PRIMARY_SPAN_NAME; +use crate::query_planner::DEFER_SPAN_NAME; use crate::query_planner::FETCH_SPAN_NAME; use crate::query_planner::FLATTEN_SPAN_NAME; use crate::query_planner::PARALLEL_SPAN_NAME; @@ -107,45 +113,54 @@ impl PlanNode { PlanNode::Sequence { nodes } => { value = parent_value.clone(); errors = Vec::new(); - let span = tracing::info_span!(SEQUENCE_SPAN_NAME); - for node in nodes { - let (v, subselect, err) = node - .execute_recursively(parameters, current_dir, &value, sender.clone()) - .instrument(span.clone()) - .in_current_span() - .await; - value.deep_merge(v); - errors.extend(err.into_iter()); - subselection = subselect; + async { + for node in nodes { + let (v, subselect, err) = node + .execute_recursively( + parameters, + current_dir, + &value, + sender.clone(), + ) + .in_current_span() + .await; + value.deep_merge(v); + errors.extend(err.into_iter()); + subselection = subselect; + } } + .instrument( + tracing::info_span!(SEQUENCE_SPAN_NAME, "otel.kind" = %SpanKind::Internal), + ) + .await } PlanNode::Parallel { nodes } => { value = Value::default(); errors = Vec::new(); + async { + let mut stream: stream::FuturesUnordered<_> = nodes + .iter() + .map(|plan| { + plan.execute_recursively( + parameters, + current_dir, + parent_value, + sender.clone(), + ) + .in_current_span() + }) + .collect(); - let span = tracing::info_span!(PARALLEL_SPAN_NAME); - let mut stream: stream::FuturesUnordered<_> = nodes - .iter() - .map(|plan| { - plan.execute_recursively( - parameters, - current_dir, - parent_value, - sender.clone(), - ) - .instrument(span.clone()) - }) - .collect(); - - while let Some((v, _subselect, err)) = stream - .next() - .instrument(span.clone()) - .in_current_span() - .await - { - value.deep_merge(v); - errors.extend(err.into_iter()); + while let Some((v, _subselect, err)) = stream.next().in_current_span().await + { + value.deep_merge(v); + errors.extend(err.into_iter()); + } } + .instrument( + tracing::info_span!(PARALLEL_SPAN_NAME, "otel.kind" = %SpanKind::Internal), + ) + .await } PlanNode::Flatten(FlattenNode { path, node }) => { // Note that the span must be `info` as we need to pick this up in apollo tracing @@ -158,9 +173,7 @@ impl PlanNode { parent_value, sender, ) - .instrument( - tracing::info_span!(FLATTEN_SPAN_NAME, apollo_private.path = %current_dir), - ) + .instrument(tracing::info_span!(FLATTEN_SPAN_NAME, "graphql.path" = %current_dir, "otel.kind" = %SpanKind::Internal)) .await; value = v; @@ -200,66 +213,63 @@ impl PlanNode { }, deferred, } => { - let mut deferred_fetches: HashMap)>> = - HashMap::new(); - let mut futures = Vec::new(); - - let (primary_sender, _) = tokio::sync::broadcast::channel::(1); + value = parent_value.clone(); + errors = Vec::new(); + async { + let mut deferred_fetches: HashMap)>> = + HashMap::new(); + let mut futures = Vec::new(); - for deferred_node in deferred { - let fut = deferred_node.execute( - parameters, - parent_value, - sender.clone(), - &primary_sender, - &mut deferred_fetches, - ); + let (primary_sender, _) = tokio::sync::broadcast::channel::(1); - futures.push(fut); - } + for deferred_node in deferred { + let fut = deferred_node + .execute( + parameters, + parent_value, + sender.clone(), + &primary_sender, + &mut deferred_fetches, + ) + .in_current_span(); - tokio::task::spawn( - async move { - join_all(futures).await; + futures.push(fut); } - .in_current_span(), - ); - - value = parent_value.clone(); - errors = Vec::new(); - let span = tracing::info_span!("primary"); - if let Some(node) = node { - let (v, _subselect, err) = node - .execute_recursively( - &ExecutionParameters { - context: parameters.context, - service_factory: parameters.service_factory, - schema: parameters.schema, - supergraph_request: parameters.supergraph_request, - deferred_fetches: &deferred_fetches, - options: parameters.options, - query: parameters.query, - }, - current_dir, - &value, - sender, - ) - .instrument(span.clone()) - .in_current_span() - .await; - let _guard = span.enter(); - value.deep_merge(v); - errors.extend(err.into_iter()); - subselection = primary_subselection.clone(); - let _ = primary_sender.send(value.clone()); - } else { - let _guard = span.enter(); + tokio::task::spawn(async move { + join_all(futures).await; + }); - subselection = primary_subselection.clone(); + if let Some(node) = node { + let (v, _subselect, err) = node + .execute_recursively( + &ExecutionParameters { + context: parameters.context, + service_factory: parameters.service_factory, + schema: parameters.schema, + supergraph_request: parameters.supergraph_request, + deferred_fetches: &deferred_fetches, + options: parameters.options, + query: parameters.query, + }, + current_dir, + &value, + sender, + ) + .instrument(tracing::info_span!(DEFER_PRIMARY_SPAN_NAME, "otel.kind" = %SpanKind::Internal)) + .await; + value.deep_merge(v); + errors.extend(err.into_iter()); + subselection = primary_subselection.clone(); - let _ = primary_sender.send(value.clone()); + let _ = primary_sender.send(value.clone()); + } else { + subselection = primary_subselection.clone(); + let _ = primary_sender.send(value.clone()); + } } + .instrument(tracing::info_span!(DEFER_SPAN_NAME, "otel.kind" = %SpanKind::Internal)) + .await } PlanNode::Condition { condition, @@ -269,23 +279,37 @@ impl PlanNode { value = Value::default(); errors = Vec::new(); - let v = parameters - .query - .variable_value( - parameters - .supergraph_request - .body() - .operation_name - .as_deref(), - condition.as_str(), - ¶meters.supergraph_request.body().variables, - ) - .unwrap_or(&Value::Bool(true)); // the defer if clause is mandatory, and defaults to true - - if let &Value::Bool(true) = v { - //FIXME: should we show an error if the if_node was not present? - if let Some(node) = if_clause { - let span = tracing::info_span!("condition_if"); + async { + let v = parameters + .query + .variable_value( + parameters + .supergraph_request + .body() + .operation_name + .as_deref(), + condition.as_str(), + ¶meters.supergraph_request.body().variables, + ) + .unwrap_or(&Value::Bool(true)); // the defer if clause is mandatory, and defaults to true + + if let &Value::Bool(true) = v { + //FIXME: should we show an error if the if_node was not present? + if let Some(node) = if_clause { + let (v, subselect, err) = node + .execute_recursively( + parameters, + current_dir, + parent_value, + sender.clone(), + ) + .instrument(tracing::info_span!(CONDITION_IF_SPAN_NAME, "otel.kind" = %SpanKind::Internal)) + .await; + value.deep_merge(v); + errors.extend(err.into_iter()); + subselection = subselect; + } + } else if let Some(node) = else_clause { let (v, subselect, err) = node .execute_recursively( parameters, @@ -293,29 +317,19 @@ impl PlanNode { parent_value, sender.clone(), ) - .instrument(span.clone()) - .in_current_span() + .instrument(tracing::info_span!(CONDITION_ELSE_SPAN_NAME, "otel.kind" = %SpanKind::Internal)) .await; value.deep_merge(v); errors.extend(err.into_iter()); subselection = subselect; } - } else if let Some(node) = else_clause { - let span = tracing::info_span!("condition_else"); - let (v, subselect, err) = node - .execute_recursively( - parameters, - current_dir, - parent_value, - sender.clone(), - ) - .instrument(span.clone()) - .in_current_span() - .await; - value.deep_merge(v); - errors.extend(err.into_iter()); - subselection = subselect; } + .instrument(tracing::info_span!( + CONDITION_SPAN_NAME, + "graphql.condition" = condition, + "otel.kind" = %SpanKind::Internal + )) + .await } } @@ -374,7 +388,7 @@ impl DeferredNode { let query = parameters.query.clone(); let mut primary_receiver = primary_sender.subscribe(); let mut value = parent_value.clone(); - + let depends_json = serde_json::to_string(&self.depends).unwrap_or_default(); async move { let mut errors = Vec::new(); @@ -394,7 +408,6 @@ impl DeferredNode { } } - let span = tracing::info_span!("deferred"); let deferred_fetches = HashMap::new(); if let Some(node) = deferred_inner { @@ -413,8 +426,13 @@ impl DeferredNode { &value, tx.clone(), ) - .instrument(span.clone()) - .in_current_span() + .instrument(tracing::info_span!( + DEFER_DEFERRED_SPAN_NAME, + "graphql.label" = label, + "graphql.depends" = depends_json, + "graphql.path" = deferred_path.to_string(), + "otel.kind" = %SpanKind::Internal + )) .await; if !is_depends_empty { diff --git a/apollo-router/src/query_planner/mod.rs b/apollo-router/src/query_planner/mod.rs index 1163724e55..25a7d6a57e 100644 --- a/apollo-router/src/query_planner/mod.rs +++ b/apollo-router/src/query_planner/mod.rs @@ -20,6 +20,12 @@ pub(crate) const FETCH_SPAN_NAME: &str = "fetch"; pub(crate) const FLATTEN_SPAN_NAME: &str = "flatten"; pub(crate) const SEQUENCE_SPAN_NAME: &str = "sequence"; pub(crate) const PARALLEL_SPAN_NAME: &str = "parallel"; +pub(crate) const DEFER_SPAN_NAME: &str = "defer"; +pub(crate) const DEFER_PRIMARY_SPAN_NAME: &str = "defer_primary"; +pub(crate) const DEFER_DEFERRED_SPAN_NAME: &str = "defer_deferred"; +pub(crate) const CONDITION_SPAN_NAME: &str = "condition"; +pub(crate) const CONDITION_IF_SPAN_NAME: &str = "condition_if"; +pub(crate) const CONDITION_ELSE_SPAN_NAME: &str = "condition_else"; // The code resides in a separate submodule to allow writing a log filter activating it // separately from the query planner logs, as follows: diff --git a/apollo-router/src/query_planner/plan.rs b/apollo-router/src/query_planner/plan.rs index 873b6d4593..2635d081b9 100644 --- a/apollo-router/src/query_planner/plan.rs +++ b/apollo-router/src/query_planner/plan.rs @@ -132,17 +132,6 @@ impl PlanNode { } } - pub(crate) fn contains_condition_or_defer(&self) -> bool { - match self { - Self::Sequence { nodes } => nodes.iter().any(|n| n.contains_condition_or_defer()), - Self::Parallel { nodes } => nodes.iter().any(|n| n.contains_condition_or_defer()), - Self::Flatten(node) => node.node.contains_condition_or_defer(), - Self::Fetch(..) => false, - Self::Defer { .. } => true, - Self::Condition { .. } => true, - } - } - pub(crate) fn is_deferred( &self, operation: Option<&str>, diff --git a/apollo-router/tests/snapshots/integration_tests__traced_basic_composition.snap b/apollo-router/tests/snapshots/integration_tests__traced_basic_composition.snap index c528154c2d..0cb443e284 100644 --- a/apollo-router/tests/snapshots/integration_tests__traced_basic_composition.snap +++ b/apollo-router/tests/snapshots/integration_tests__traced_basic_composition.snap @@ -47,7 +47,7 @@ expression: get_spans() ], [ "apollo_private.graphql.variables", - "{\"reviewsForAuthorAuthorId\":[\"\"],\"topProductsFirst\":[\"\"]}" + "{\"reviewsForAuthorAuthorId\":\"\",\"topProductsFirst\":\"\"}" ], [ "apollo_private.http.request_headers", @@ -140,22 +140,10 @@ expression: get_spans() "name": "apollo_router::plugins::telemetry::execution", "record": { "entries": [ - [ - "graphql.document", - "{ topProducts { upc name reviews {id product { name } author { id name } } } }" - ], - [ - "graphql.operation.name", - "" - ], [ "otel.kind", "internal" ], - [ - "ftv1.do_not_sample_reason", - "" - ], [ "message", "dropping telemetry..." @@ -168,10 +156,7 @@ expression: get_spans() "module_path": "apollo_router::plugins::telemetry", "fields": { "names": [ - "graphql.document", - "graphql.operation.name", - "otel.kind", - "ftv1.do_not_sample_reason" + "otel.kind" ] } } @@ -180,14 +165,21 @@ expression: get_spans() "apollo_router::query_planner::execution::sequence": { "name": "apollo_router::query_planner::execution::sequence", "record": { - "entries": [], + "entries": [ + [ + "otel.kind", + "internal" + ] + ], "metadata": { "name": "sequence", "target": "apollo_router::query_planner::execution", "level": "INFO", "module_path": "apollo_router::query_planner::execution", "fields": { - "names": [] + "names": [ + "otel.kind" + ] } } }, @@ -399,8 +391,12 @@ expression: get_spans() "record": { "entries": [ [ - "apollo_private.path", + "graphql.path", "/topProducts/@" + ], + [ + "otel.kind", + "internal" ] ], "metadata": { @@ -410,7 +406,8 @@ expression: get_spans() "module_path": "apollo_router::query_planner::execution", "fields": { "names": [ - "apollo_private.path" + "graphql.path", + "otel.kind" ] } } @@ -623,14 +620,21 @@ expression: get_spans() "apollo_router::query_planner::execution::parallel": { "name": "apollo_router::query_planner::execution::parallel", "record": { - "entries": [], + "entries": [ + [ + "otel.kind", + "internal" + ] + ], "metadata": { "name": "parallel", "target": "apollo_router::query_planner::execution", "level": "INFO", "module_path": "apollo_router::query_planner::execution", "fields": { - "names": [] + "names": [ + "otel.kind" + ] } } }, @@ -640,8 +644,12 @@ expression: get_spans() "record": { "entries": [ [ - "apollo_private.path", + "graphql.path", "/topProducts/@/reviews/@/product" + ], + [ + "otel.kind", + "internal" ] ], "metadata": { @@ -651,7 +659,8 @@ expression: get_spans() "module_path": "apollo_router::query_planner::execution", "fields": { "names": [ - "apollo_private.path" + "graphql.path", + "otel.kind" ] } } @@ -866,8 +875,12 @@ expression: get_spans() "record": { "entries": [ [ - "apollo_private.path", + "graphql.path", "/topProducts/@/reviews/@/author" + ], + [ + "otel.kind", + "internal" ] ], "metadata": { @@ -877,7 +890,8 @@ expression: get_spans() "module_path": "apollo_router::query_planner::execution", "fields": { "names": [ - "apollo_private.path" + "graphql.path", + "otel.kind" ] } } diff --git a/apollo-router/tests/snapshots/integration_tests__traced_basic_request.snap b/apollo-router/tests/snapshots/integration_tests__traced_basic_request.snap index b375f05c41..bc33af8a03 100644 --- a/apollo-router/tests/snapshots/integration_tests__traced_basic_request.snap +++ b/apollo-router/tests/snapshots/integration_tests__traced_basic_request.snap @@ -47,7 +47,7 @@ expression: get_spans() ], [ "apollo_private.graphql.variables", - "{\"reviewsForAuthorAuthorId\":[\"\"],\"topProductsFirst\":[\"\"]}" + "{\"reviewsForAuthorAuthorId\":\"\",\"topProductsFirst\":\"\"}" ], [ "apollo_private.http.request_headers", @@ -140,22 +140,10 @@ expression: get_spans() "name": "apollo_router::plugins::telemetry::execution", "record": { "entries": [ - [ - "graphql.document", - "{ topProducts { name name2:name } }" - ], - [ - "graphql.operation.name", - "" - ], [ "otel.kind", "internal" ], - [ - "ftv1.do_not_sample_reason", - "" - ], [ "message", "dropping telemetry..." @@ -168,10 +156,7 @@ expression: get_spans() "module_path": "apollo_router::plugins::telemetry", "fields": { "names": [ - "graphql.document", - "graphql.operation.name", - "otel.kind", - "ftv1.do_not_sample_reason" + "otel.kind" ] } } diff --git a/xtask/src/commands/compliance.rs b/xtask/src/commands/compliance.rs index 4988c00df6..b93a94a901 100644 --- a/xtask/src/commands/compliance.rs +++ b/xtask/src/commands/compliance.rs @@ -12,7 +12,11 @@ use xtask::*; static LICENSES_HTML_PATH: &str = "licenses.html"; #[derive(Debug, StructOpt)] -pub struct Compliance {} +pub struct Compliance { + /// Do not run the compliance test + #[structopt(long)] + skip_compliance: bool, +} impl Compliance { pub fn run(&self) -> Result<()> { @@ -56,33 +60,37 @@ impl Compliance { } pub fn run_local(&self) -> Result<()> { - eprintln!("Checking generated licenses.html file..."); + if !self.skip_compliance { + eprintln!("Checking generated licenses.html file..."); - cargo!(["deny", "-L", "error", "check"]); + cargo!(["deny", "-L", "error", "check"]); - let licenses_html_before = Self::digest_for_license_file()?; + let licenses_html_before = Self::digest_for_license_file()?; - cargo!([ - "about", - "-L", - "error", - "generate", - "--workspace", - "-o", - "licenses.html", - "about.hbs", - ]); + cargo!([ + "about", + "-L", + "error", + "generate", + "--workspace", + "-o", + "licenses.html", + "about.hbs", + ]); - let licences_html_after = Self::digest_for_license_file()?; + let licences_html_after = Self::digest_for_license_file()?; - (licenses_html_before != licences_html_after).then(|| { - eprintln!( - "💅 licenses.html is now up to date. 💅\n\ + (licenses_html_before != licences_html_after).then(|| { + eprintln!( + "💅 licenses.html is now up to date. 💅\n\ Commit the changes and you should be good to go!" - ); - }); + ); + }); - Ok(()) + Ok(()) + } else { + Ok(()) + } } fn digest_for_license_file() -> Result> { From 20a1453ad966b6c6bc6ac8b6cacd143cc3ae843e Mon Sep 17 00:00:00 2001 From: Geoffroy Couprie Date: Fri, 2 Dec 2022 16:06:44 +0100 Subject: [PATCH 39/45] stabilize Redis caching for APQ and query plan (#2176) * check for Redis connection issues * use a null byte as separator for Redis key to prevent potential injections in the future * mark all of Redis caching as license key functionality under Elastic License v2 --- NEXT_CHANGELOG.md | 8 + .../axum_factory/axum_http_server_factory.rs | 1 + apollo-router/src/cache/mod.rs | 25 ++- apollo-router/src/cache/redis.rs | 144 +++++++++++++++ apollo-router/src/cache/storage.rs | 172 +++--------------- apollo-router/src/configuration/mod.rs | 1 + apollo-router/src/introspection.rs | 2 +- .../query_planner/caching_query_planner.rs | 10 +- apollo-router/src/router.rs | 9 +- apollo-router/src/services/layers/apq.rs | 16 +- apollo-router/src/test_harness.rs | 19 +- 11 files changed, 233 insertions(+), 174 deletions(-) create mode 100644 apollo-router/src/cache/redis.rs diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 19abfa8f58..9dc3229e79 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -283,6 +283,14 @@ to validate the data sent back to the client. Those query shapes were invalid fo By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2102 +### *Experimental* 🥼 APQ and query planner Redis caching fixes ([PR #2176](https://github.com/apollographql/router/pull/2176)) + +* use a null byte as separator in Redis keys +* handle Redis connection errors +* mark APQ and query plan caching as license key functionality + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2176 + ## 🛠 Maintenance ### Verify that deferred fragment acts as a boundary for nullability rules ([Issue #2169](https://github.com/apollographql/router/issues/2169)) diff --git a/apollo-router/src/axum_factory/axum_http_server_factory.rs b/apollo-router/src/axum_factory/axum_http_server_factory.rs index 5fcf3a3758..53107202f8 100644 --- a/apollo-router/src/axum_factory/axum_http_server_factory.rs +++ b/apollo-router/src/axum_factory/axum_http_server_factory.rs @@ -166,6 +166,7 @@ impl HttpServerFactory for AxumHttpServerFactory { let apq = APQLayer::with_cache( DeduplicatingCache::from_configuration( &configuration.supergraph.apq.experimental_cache, + "APQ", ) .await, ); diff --git a/apollo-router/src/cache/mod.rs b/apollo-router/src/cache/mod.rs index e98a664ed6..5ade794dc3 100644 --- a/apollo-router/src/cache/mod.rs +++ b/apollo-router/src/cache/mod.rs @@ -9,6 +9,8 @@ use self::storage::CacheStorage; use self::storage::KeyType; use self::storage::ValueType; +#[cfg(feature = "experimental_cache")] +mod redis; pub(crate) mod storage; type WaitMap = Arc>>>; @@ -26,24 +28,33 @@ where K: KeyType + 'static, V: ValueType + 'static, { + #[cfg(test)] pub(crate) async fn new() -> Self { - Self::with_capacity(DEFAULT_CACHE_CAPACITY, None).await + Self::with_capacity(DEFAULT_CACHE_CAPACITY, None, "test").await } - pub(crate) async fn with_capacity(capacity: usize, redis_urls: Option>) -> Self { + pub(crate) async fn with_capacity( + capacity: usize, + redis_urls: Option>, + caller: &str, + ) -> Self { Self { wait_map: Arc::new(Mutex::new(HashMap::new())), - storage: CacheStorage::new(capacity, redis_urls).await, + storage: CacheStorage::new(capacity, redis_urls, caller).await, } } - pub(crate) async fn from_configuration(config: &crate::configuration::Cache) -> Self { + pub(crate) async fn from_configuration( + config: &crate::configuration::Cache, + caller: &str, + ) -> Self { Self::with_capacity( config.in_memory.limit, #[cfg(feature = "experimental_cache")] config.redis.as_ref().map(|c| c.urls.clone()), #[cfg(not(feature = "experimental_cache"))] None, + caller, ) .await } @@ -198,7 +209,7 @@ mod tests { #[tokio::test] async fn example_cache_usage() { let k = "key".to_string(); - let cache = DeduplicatingCache::with_capacity(1, None).await; + let cache = DeduplicatingCache::with_capacity(1, None, "test").await; let entry = cache.get(&k).await; @@ -215,7 +226,7 @@ mod tests { #[test(tokio::test)] async fn it_should_enforce_cache_limits() { let cache: DeduplicatingCache = - DeduplicatingCache::with_capacity(13, None).await; + DeduplicatingCache::with_capacity(13, None, "test").await; for i in 0..14 { let entry = cache.get(&i).await; @@ -238,7 +249,7 @@ mod tests { mock.expect_retrieve().times(1).return_const(1usize); let cache: DeduplicatingCache = - DeduplicatingCache::with_capacity(10, None).await; + DeduplicatingCache::with_capacity(10, None, "test").await; // Let's trigger 100 concurrent gets of the same value and ensure only // one delegated retrieve is made diff --git a/apollo-router/src/cache/redis.rs b/apollo-router/src/cache/redis.rs new file mode 100644 index 0000000000..e5294ff008 --- /dev/null +++ b/apollo-router/src/cache/redis.rs @@ -0,0 +1,144 @@ +// This entire file is license key functionality + +use std::fmt; +use std::sync::Arc; + +use redis::AsyncCommands; +use redis::FromRedisValue; +use redis::RedisResult; +use redis::RedisWrite; +use redis::ToRedisArgs; +use redis_cluster_async::Client; +use redis_cluster_async::Connection; +use tokio::sync::Mutex; + +use super::KeyType; +use super::ValueType; + +#[derive(Clone, Debug, Eq, Hash, PartialEq)] +pub(crate) struct RedisKey(pub(crate) K) +where + K: KeyType; + +#[derive(Clone, Debug)] +pub(crate) struct RedisValue(pub(crate) V) +where + V: ValueType; + +#[derive(Clone)] +pub(crate) struct RedisCacheStorage { + inner: Arc>, +} + +fn get_type_of(_: &T) -> &'static str { + std::any::type_name::() +} + +impl fmt::Display for RedisKey +where + K: KeyType, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl ToRedisArgs for RedisKey +where + K: KeyType, +{ + fn write_redis_args(&self, out: &mut W) + where + W: ?Sized + RedisWrite, + { + out.write_arg_fmt(self); + } +} + +impl fmt::Display for RedisValue +where + V: ValueType, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}|{:?}", get_type_of(&self.0), self.0) + } +} + +impl ToRedisArgs for RedisValue +where + V: ValueType, +{ + fn write_redis_args(&self, out: &mut W) + where + W: ?Sized + RedisWrite, + { + let v = serde_json::to_vec(&self.0) + .expect("JSON serialization should not fail for redis values"); + out.write_arg(&v); + } +} + +impl FromRedisValue for RedisValue +where + V: ValueType, +{ + fn from_redis_value(v: &redis::Value) -> RedisResult { + match v { + redis::Value::Bulk(bulk_data) => { + for entry in bulk_data { + tracing::trace!("entry: {:?}", entry); + } + Err(redis::RedisError::from(( + redis::ErrorKind::TypeError, + "the data is the wrong type", + ))) + } + redis::Value::Data(v) => serde_json::from_slice(v).map(RedisValue).map_err(|e| { + redis::RedisError::from(( + redis::ErrorKind::TypeError, + "can't deserialize from JSON", + e.to_string(), + )) + }), + res => Err(redis::RedisError::from(( + redis::ErrorKind::TypeError, + "the data is the wrong type", + format!("{:?}", res), + ))), + } + } +} + +impl RedisCacheStorage { + pub(crate) async fn new(urls: Vec) -> Result { + let client = Client::open(urls)?; + let connection = client.get_connection().await?; + + tracing::trace!("redis connection established"); + Ok(Self { + inner: Arc::new(Mutex::new(connection)), + }) + } + + pub(crate) async fn get( + &self, + key: RedisKey, + ) -> Option> { + tracing::trace!("getting from redis: {:?}", key); + let mut guard = self.inner.lock().await; + guard.get(key).await.ok() + } + + pub(crate) async fn insert( + &self, + key: RedisKey, + value: RedisValue, + ) { + tracing::trace!("inserting into redis: {:?}, {:?}", key, value); + let mut guard = self.inner.lock().await; + let r = guard + .set::, RedisValue, redis::Value>(key, value) + .await; + tracing::trace!("insert result {:?}", r); + } +} diff --git a/apollo-router/src/cache/storage.rs b/apollo-router/src/cache/storage.rs index 320ccbd6c8..b719c1988b 100644 --- a/apollo-router/src/cache/storage.rs +++ b/apollo-router/src/cache/storage.rs @@ -1,3 +1,5 @@ +// This entire file is license key functionality + use std::fmt; use std::hash::Hash; use std::sync::Arc; @@ -7,6 +9,9 @@ use serde::de::DeserializeOwned; use serde::Serialize; use tokio::sync::Mutex; +#[cfg(feature = "experimental_cache")] +use super::redis::*; + pub(crate) trait KeyType: Clone + fmt::Debug + fmt::Display + Hash + Eq + Send + Sync { @@ -34,9 +39,6 @@ where // It has the functions it needs already } -#[cfg(feature = "experimental_cache")] -use redis_storage::*; - // placeholder storage module // // this will be replaced by the multi level (in memory + redis/memcached) once we find @@ -53,12 +55,26 @@ where K: KeyType, V: ValueType, { - pub(crate) async fn new(max_capacity: usize, _redis_urls: Option>) -> Self { + pub(crate) async fn new( + max_capacity: usize, + _redis_urls: Option>, + _caller: &str, + ) -> Self { Self { inner: Arc::new(Mutex::new(LruCache::new(max_capacity))), #[cfg(feature = "experimental_cache")] redis: if let Some(urls) = _redis_urls { - Some(RedisCacheStorage::new(urls).await) + match RedisCacheStorage::new(urls).await { + Err(e) => { + tracing::error!( + "could not open connection to Redis for {} caching: {:?}", + _caller, + e + ); + None + } + Ok(storage) => Some(storage), + } } else { None }, @@ -103,149 +119,3 @@ where self.inner.lock().await.len() } } - -#[cfg(feature = "experimental_cache")] -mod redis_storage { - use std::fmt; - use std::sync::Arc; - - use redis::AsyncCommands; - use redis::FromRedisValue; - use redis::RedisResult; - use redis::RedisWrite; - use redis::ToRedisArgs; - use redis_cluster_async::Client; - use redis_cluster_async::Connection; - use tokio::sync::Mutex; - - use super::KeyType; - use super::ValueType; - - #[derive(Clone, Debug, Eq, Hash, PartialEq)] - pub(crate) struct RedisKey(pub(crate) K) - where - K: KeyType; - - #[derive(Clone, Debug)] - pub(crate) struct RedisValue(pub(crate) V) - where - V: ValueType; - - #[derive(Clone)] - pub(crate) struct RedisCacheStorage { - inner: Arc>, - } - - fn get_type_of(_: &T) -> &'static str { - std::any::type_name::() - } - - impl fmt::Display for RedisKey - where - K: KeyType, - { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.0) - } - } - - impl ToRedisArgs for RedisKey - where - K: KeyType, - { - fn write_redis_args(&self, out: &mut W) - where - W: ?Sized + RedisWrite, - { - out.write_arg_fmt(self); - } - } - - impl fmt::Display for RedisValue - where - V: ValueType, - { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}|{:?}", get_type_of(&self.0), self.0) - } - } - - impl ToRedisArgs for RedisValue - where - V: ValueType, - { - fn write_redis_args(&self, out: &mut W) - where - W: ?Sized + RedisWrite, - { - let v = serde_json::to_vec(&self.0).unwrap(); - out.write_arg(&v); - } - } - - impl FromRedisValue for RedisValue - where - V: ValueType, - { - fn from_redis_value(v: &redis::Value) -> RedisResult { - match v { - redis::Value::Bulk(bulk_data) => { - for entry in bulk_data { - tracing::trace!("entry: {:?}", entry); - // entry.parse::().unwrap() - } - Err(redis::RedisError::from(( - redis::ErrorKind::TypeError, - "the data is the wrong type", - ))) - } - redis::Value::Data(v) => serde_json::from_slice(v).map(RedisValue).map_err(|e| { - redis::RedisError::from(( - redis::ErrorKind::TypeError, - "can't deserialize from JSON", - e.to_string(), - )) - }), - res => Err(redis::RedisError::from(( - redis::ErrorKind::TypeError, - "the data is the wrong type", - format!("{:?}", res), - ))), - } - } - } - - impl RedisCacheStorage { - pub(crate) async fn new(urls: Vec) -> Self { - let client = Client::open(urls).expect("opening ClusterClient"); - let connection = client.get_connection().await.expect("got redis connection"); - - tracing::trace!("redis connection established"); - Self { - inner: Arc::new(Mutex::new(connection)), - } - } - - pub(crate) async fn get( - &self, - key: RedisKey, - ) -> Option> { - tracing::trace!("getting from redis: {:?}", key); - let mut guard = self.inner.lock().await; - guard.get(key).await.ok() - } - - pub(crate) async fn insert( - &self, - key: RedisKey, - value: RedisValue, - ) { - tracing::trace!("inserting into redis: {:?}, {:?}", key, value); - let mut guard = self.inner.lock().await; - let r = guard - .set::, RedisValue, redis::Value>(key, value) - .await; - tracing::trace!("insert result {:?}", r); - } - } -} diff --git a/apollo-router/src/configuration/mod.rs b/apollo-router/src/configuration/mod.rs index 721004b4ee..a4be643455 100644 --- a/apollo-router/src/configuration/mod.rs +++ b/apollo-router/src/configuration/mod.rs @@ -374,6 +374,7 @@ impl Configuration { }, ); } + Ok(self) } } diff --git a/apollo-router/src/introspection.rs b/apollo-router/src/introspection.rs index 1dbbcc3770..90b00a03d0 100644 --- a/apollo-router/src/introspection.rs +++ b/apollo-router/src/introspection.rs @@ -21,7 +21,7 @@ pub(crate) struct Introspection { impl Introspection { pub(crate) async fn with_capacity(configuration: &Configuration, capacity: usize) -> Self { Self { - cache: CacheStorage::new(capacity, None).await, + cache: CacheStorage::new(capacity, None, "introspection").await, defer_support: configuration.supergraph.preview_defer_support, } } diff --git a/apollo-router/src/query_planner/caching_query_planner.rs b/apollo-router/src/query_planner/caching_query_planner.rs index da2ed9c789..7f4d2861be 100644 --- a/apollo-router/src/query_planner/caching_query_planner.rs +++ b/apollo-router/src/query_planner/caching_query_planner.rs @@ -1,3 +1,5 @@ +// This entire file is license key functionality + use std::collections::HashMap; use std::ops::Deref; use std::sync::Arc; @@ -38,8 +40,10 @@ where schema_id: Option, config: &crate::configuration::QueryPlanning, ) -> CachingQueryPlanner { - let cache = - Arc::new(DeduplicatingCache::from_configuration(&config.experimental_cache).await); + let cache = Arc::new( + DeduplicatingCache::from_configuration(&config.experimental_cache, "query planner") + .await, + ); Self { cache, delegate, @@ -190,7 +194,7 @@ impl std::fmt::Display for CachingQueryKey { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, - "plan|{}|{}|{}", + "plan\0{}\0{}\0{}", self.schema_id.as_deref().unwrap_or("-"), self.query, self.operation.as_deref().unwrap_or("-") diff --git a/apollo-router/src/router.rs b/apollo-router/src/router.rs index 41352ca776..7e0d741ac0 100644 --- a/apollo-router/src/router.rs +++ b/apollo-router/src/router.rs @@ -36,6 +36,7 @@ use self::Event::UpdateSchema; use crate::axum_factory::make_axum_router; use crate::axum_factory::AxumHttpServerFactory; use crate::axum_factory::ListenAddrAndRouter; +use crate::cache::DeduplicatingCache; use crate::configuration::Configuration; use crate::configuration::ListenAddr; use crate::plugin::DynPlugin; @@ -64,7 +65,13 @@ async fn make_transport_service( .create(configuration.clone(), schema, None, Some(extra_plugins)) .await?; - let apq = APQLayer::new().await; + let apq = APQLayer::with_cache( + DeduplicatingCache::from_configuration( + &configuration.supergraph.apq.experimental_cache, + "APQ", + ) + .await, + ); let web_endpoints = service_factory.web_endpoints(); let routers = make_axum_router(service_factory, &configuration, web_endpoints, apq)?; // FIXME: how should diff --git a/apollo-router/src/services/layers/apq.rs b/apollo-router/src/services/layers/apq.rs index 6f8f309876..18d82f5166 100644 --- a/apollo-router/src/services/layers/apq.rs +++ b/apollo-router/src/services/layers/apq.rs @@ -3,6 +3,8 @@ //! For more information on APQ see: //! +// This entire file is license key functionality + use std::ops::ControlFlow; use futures::future::BoxFuture; @@ -38,12 +40,6 @@ pub(crate) struct APQLayer { } impl APQLayer { - pub(crate) async fn new() -> Self { - Self { - cache: DeduplicatingCache::new().await, - } - } - pub(crate) fn with_cache(cache: DeduplicatingCache) -> Self { Self { cache } } @@ -123,14 +119,14 @@ pub(crate) async fn apq_request( if query_matches_hash(query.as_str(), query_hash_bytes.as_slice()) { tracing::trace!("apq: cache insert"); let _ = request.context.insert("persisted_query_hit", false); - cache.insert(format!("apq|{query_hash}"), query).await; + cache.insert(redis_key(&query_hash), query).await; } else { tracing::warn!("apq: graphql request doesn't match provided sha256Hash"); } Ok(request) } (Some((apq_hash, _)), _) => { - if let Ok(cached_query) = cache.get(&format!("apq|{apq_hash}")).await.get().await { + if let Ok(cached_query) = cache.get(&redis_key(&apq_hash)).await.get().await { let _ = request.context.insert("persisted_query_hit", true); tracing::trace!("apq: cache hit"); request.supergraph_request.body_mut().query = Some(cached_query); @@ -171,6 +167,10 @@ fn query_matches_hash(query: &str, hash: &[u8]) -> bool { hash == digest.finalize().as_slice() } +fn redis_key(query_hash: &str) -> String { + format!("apq\0{query_hash}") +} + #[cfg(test)] mod apq_tests { use std::borrow::Cow; diff --git a/apollo-router/src/test_harness.rs b/apollo-router/src/test_harness.rs index 93de732c4b..f22450d91b 100644 --- a/apollo-router/src/test_harness.rs +++ b/apollo-router/src/test_harness.rs @@ -5,6 +5,7 @@ use tower::BoxError; use tower::Layer; use tower::ServiceExt; +use crate::cache::DeduplicatingCache; use crate::configuration::Configuration; use crate::plugin::test::canned; use crate::plugin::test::MockSubgraph; @@ -210,8 +211,14 @@ impl<'a> TestHarness<'a> { /// Builds the GraphQL service pub async fn build(self) -> Result { - let (_config, router_creator) = self.build_common().await?; - let apq = APQLayer::new().await; + let (configuration, router_creator) = self.build_common().await?; + let apq = APQLayer::with_cache( + DeduplicatingCache::from_configuration( + &configuration.supergraph.apq.experimental_cache, + "APQ", + ) + .await, + ); Ok(tower::service_fn(move |request| { // APQ must be added here because it is implemented in the HTTP server @@ -230,7 +237,13 @@ impl<'a> TestHarness<'a> { let (config, router_creator) = self.build_common().await?; let web_endpoints = router_creator.web_endpoints(); - let apq = APQLayer::new().await; + let apq = APQLayer::with_cache( + DeduplicatingCache::from_configuration( + &config.supergraph.apq.experimental_cache, + "APQ", + ) + .await, + ); let routers = make_axum_router(router_creator, &config, web_endpoints, apq)?; let ListenAddrAndRouter(_listener, router) = routers.main; From 665a5bdb4e3844454a7dc11bc40f32a0ebbd979a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Fri, 2 Dec 2022 15:18:37 +0000 Subject: [PATCH 40/45] fix(deps): update rust crate tower-http to 0.3.5 --- Cargo.lock | 4 ++-- apollo-router/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 204cce33db..b4c65ec68d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5341,9 +5341,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c530c8675c1dbf98facee631536fa116b5fb6382d7dd6dc1b118d970eafe3ba" +checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" dependencies = [ "async-compression", "bitflags", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 4b4fd1d7de..d0e5350cf8 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -171,7 +171,7 @@ tokio-stream = { version = "0.1.11", features = ["sync", "net"] } tokio-util = { version = "0.7.4", features = ["net", "codec"] } tonic = { version = "0.6.2", features = ["transport", "tls", "tls-roots"] } tower = { version = "0.4.13", features = ["full"] } -tower-http = { version = "0.3.4", features = [ +tower-http = { version = "0.3.5", features = [ "trace", "cors", "compression-br", From 3dc757e51652fb9e88a2878e0e1ec75f91248c1b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 3 Dec 2022 06:12:31 +0000 Subject: [PATCH 41/45] fix(deps): update rust crate libc to 0.2.138 --- Cargo.lock | 4 ++-- apollo-router/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b4c65ec68d..7ad15933b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2563,9 +2563,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.137" +version = "0.2.138" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89" +checksum = "db6d7e329c562c5dfab7a46a2afabc8b987ab9a4834c9d1ca04dc54c1546cef8" [[package]] name = "libfuzzer-sys" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index d0e5350cf8..b8066f7c9c 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -88,7 +88,7 @@ itertools = "0.10.5" jsonpath_lib = "0.3.0" jsonschema = { version = "0.16.1", default-features = false } lazy_static = "1.4.0" -libc = "0.2.137" +libc = "0.2.138" lru = "0.7.8" mediatype = "0.19.11" mockall = "0.11.3" From a49234390d9008e250a1a7d346091b84eb093b7a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sat, 3 Dec 2022 14:42:57 +0000 Subject: [PATCH 42/45] chore(deps): update rust crate insta to 1.22.0 --- Cargo.lock | 4 ++-- apollo-router/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7ad15933b4..bb32d30d6f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2308,9 +2308,9 @@ dependencies = [ [[package]] name = "insta" -version = "1.21.2" +version = "1.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "261bf85ed492cd1c47c9ba675e48649682a9d2d2e77f515c5386d7726fb0ba76" +checksum = "197f4e300af8b23664d4077bf5c40e0afa9ba66a567bb5a51d3def3c7b287d1c" dependencies = [ "console 0.15.2", "lazy_static", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index b8066f7c9c..c860183613 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -202,7 +202,7 @@ uname = "0.1.1" uname = "0.1.1" [dev-dependencies] -insta = { version = "1.21.2", features = ["json", "redactions", "yaml"] } +insta = { version = "1.22.0", features = ["json", "redactions", "yaml"] } introspector-gadget = "0.1.0" maplit = "1.0.2" memchr = { version = "2.5.0", default-features = false } From f65752c6f44d73db6456b58800e55febbc2039fe Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 5 Dec 2022 08:19:44 +0000 Subject: [PATCH 43/45] fix(deps): update rust crate serde to 1.0.149 --- Cargo.lock | 8 ++++---- apollo-router/Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bb32d30d6f..9952357783 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4433,9 +4433,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.148" +version = "1.0.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e53f64bb4ba0191d6d0676e1b141ca55047d83b74f5607e6d8eb88126c52c2dc" +checksum = "256b9932320c590e707b94576e3cc1f7c9024d0ee6612dfbcf1cb106cbe8e055" dependencies = [ "serde_derive", ] @@ -4452,9 +4452,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.148" +version = "1.0.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a55492425aa53521babf6137309e7d34c20bbfbbfcfe2c7f3a047fd1f6b92c0c" +checksum = "b4eae9b04cbffdfd550eb462ed33bc6a1b68c935127d008b27444d08380f94e4" dependencies = [ "proc-macro2", "quote", diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index c860183613..4aa791b8cf 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -158,7 +158,7 @@ rust-embed="6.4.2" schemars = { version = "0.8.11", features = ["url"] } shellexpand = "2.1.2" sha2 = "0.10.6" -serde = { version = "1.0.148", features = ["derive", "rc"] } +serde = { version = "1.0.149", features = ["derive", "rc"] } serde_json_bytes = { version = "0.2.0", features = ["preserve_order"] } serde_json = { version = "1.0.89", features = ["preserve_order"] } serde_urlencoded = "0.7.1" From aeb4bcf7a8b18a73e94fa0ef759eeb54b9642c49 Mon Sep 17 00:00:00 2001 From: Bryn Cooke Date: Mon, 5 Dec 2022 15:02:49 +0000 Subject: [PATCH 44/45] Partially automate release (#2202) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Automate the release: ```bash cargo xtask release prepare --help Finished dev [unoptimized + debuginfo] target(s) in 0.08s Running `xtask/target/debug/xtask release prepare --help` xtask-release-prepare 1.4.0 Prepare a new release USAGE: xtask release prepare [FLAGS] [version] FLAGS: --current-branch Release from the current branch rather than creating a new one --dry-run Dry run, don't commit the changes and create the PR -h, --help Prints help information --skip-license-ckeck Skip the license check -V, --version Prints version information ARGS: The new version that is being created OR to bump (major|minor|patch|current) bryn@amsterdam  ~/git/router   bryn/automate-release  ``` You can create a new minor release by running. ``` cargo xtask release prepare minor ``` To review please check: https://github.com/apollographql/router/pull/2206/commits/42dc28acc2258eb0043e7beabad6fe6ece29e054 Has everything it needs. As part of this xtask was removed from the root workspace. Co-authored-by: bryn Co-authored-by: Coenen Benjamin --- .cargo/config | 2 +- .circleci/config.yml | 59 + Cargo.lock | 95 -- Cargo.toml | 2 +- NEXT_CHANGELOG.md | 17 +- about.toml | 26 +- licenses.html | 295 ++++- xtask/Cargo.lock | 2318 +++++++++++++++++++++++++++++++++ xtask/Cargo.toml | 11 +- xtask/src/commands/mod.rs | 1 + xtask/src/commands/release.rs | 582 +++++++++ xtask/src/main.rs | 4 + 12 files changed, 3257 insertions(+), 155 deletions(-) create mode 100644 xtask/Cargo.lock create mode 100644 xtask/src/commands/release.rs diff --git a/.cargo/config b/.cargo/config index 856707a792..c00438ac33 100644 --- a/.cargo/config +++ b/.cargo/config @@ -1,2 +1,2 @@ [alias] -xtask = "run --locked --package xtask --" +xtask = "run --locked --package xtask --manifest-path xtask/Cargo.toml --" diff --git a/.circleci/config.yml b/.circleci/config.yml index 8a2871e648..42e906a9d2 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -175,6 +175,11 @@ commands: command: | set -e -o pipefail cargo about --version || cargo install cargo-about + - run: + name: Install cargo edit if not present + command: | + set -e -o pipefail + cargo edit --version || cargo install cargo-edit - save_cache: key: rust-<< pipeline.parameters.cache_version >>-extra-tools-<< parameters.os >> paths: @@ -511,6 +516,23 @@ jobs: # Push chart to repository helm push ${CHART} oci://ghcr.io/apollographql/helm-charts + prepare_release: + parameters: + release_type: + type: string + docker: + - image: cimg/base:stable + resource_class: small + steps: + - checkout + - gh/setup + - linux_install_baseline + - install_extra_tools: + os: linux_amd + - run: + command: > + cargo xtask release prepare << parameters.release_type >> + workflows: ci_checks: jobs: @@ -539,6 +561,43 @@ workflows: parameters: platform: [macos_test, windows_test, amd_linux_test, arm_linux_test] + - prepare_major_release_approval: + type: approval + requires: + - test + filters: + branches: + only: + - dev + - prepare_minor_release_approval: + type: approval + requires: + - test + filters: + branches: + only: + - dev + - prepare_patch_release_approval: + type: approval + requires: + - test + filters: + branches: + only: + - dev + - prepare_release: + release_type: "major" + requires: + - prepare_major_release_approval + - prepare_release: + release_type: "minor" + requires: + - prepare_minor_release_approval + - prepare_release: + release_type: "patch" + requires: + - prepare_patch_release_approval + release: jobs: - build_release: diff --git a/Cargo.lock b/Cargo.lock index 9952357783..23f64378aa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -699,24 +699,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1db59621ec70f09c5e9b597b220c7a2b43611f4710dc03ceb8748637775692c" -[[package]] -name = "camino" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ad0e1e3e88dd237a156ab9f571021b8a158caa0ae44b1968a241efb5144c1e" -dependencies = [ - "serde", -] - -[[package]] -name = "cargo-platform" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbdb825da8a5df079a43676dbe042702f1707b1109f713a01420fbb4cc71fa27" -dependencies = [ - "serde", -] - [[package]] name = "cargo-scaffold" version = "0.8.6" @@ -740,20 +722,6 @@ dependencies = [ "walkdir 2.3.2", ] -[[package]] -name = "cargo_metadata" -version = "0.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "406c859255d568f4f742b3146d51851f3bfd49f734a2c289d9107c4395ee0062" -dependencies = [ - "camino", - "cargo-platform", - "semver 1.0.14", - "serde", - "serde_json", - "thiserror", -] - [[package]] name = "cast" version = "0.3.0" @@ -4421,9 +4389,6 @@ name = "semver" version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4" -dependencies = [ - "serde", -] [[package]] name = "semver-parser" @@ -4496,12 +4461,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "serde_json_traversal" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8363f19aa1f3b2fc0bc9648fdde59c1074327a6f756fdd502fb1cfeb8179323c" - [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -4854,17 +4813,6 @@ dependencies = [ "libc", ] -[[package]] -name = "tar" -version = "0.4.38" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b55807c0344e1e6c04d7c965f5289c39a8d94ae23ed5c0b57aabac549f871c6" -dependencies = [ - "filetime", - "libc", - "xattr", -] - [[package]] name = "tempfile" version = "3.3.0" @@ -6040,38 +5988,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "xattr" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d1526bbe5aaeb5eb06885f4d987bcdfa5e23187055de9b83fe00156a821fabc" -dependencies = [ - "libc", -] - -[[package]] -name = "xtask" -version = "1.4.0" -dependencies = [ - "ansi_term", - "anyhow", - "base64 0.13.1", - "camino", - "cargo_metadata", - "flate2", - "libc", - "once_cell", - "reqwest", - "serde_json", - "serde_json_traversal", - "sha2", - "structopt", - "tar", - "tempfile", - "which", - "zip", -] - [[package]] name = "yaml-rust" version = "0.4.5" @@ -6086,14 +6002,3 @@ name = "zeroize" version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" - -[[package]] -name = "zip" -version = "0.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "537ce7411d25e54e8ae21a7ce0b15840e7bfcff15b51d697ec3266cc76bdf080" -dependencies = [ - "byteorder", - "crc32fast", - "crossbeam-utils", -] diff --git a/Cargo.toml b/Cargo.toml index af9d6fb0ad..f72820b5ee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,7 +22,7 @@ members = [ "examples/supergraph-sdl/rust", "examples/jwt-auth/rust", "fuzz", - "xtask", + # Note that xtask is not in the workspace member because it relies on dependencies that are incompatible with the router. Notably hyperx but there are others. ] # this makes build scripts and proc macros faster to compile diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 9dc3229e79..eba01bf318 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -250,7 +250,7 @@ By [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographq If you're running the Router with dev mode with an empty config file, it will no longer panic -By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/2165 +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/2195 ### Fix missing apollo tracing variables ([Issue #2186](https://github.com/apollographql/router/issues/2186)) @@ -311,6 +311,21 @@ Updates to new apollo-rs APIs, and fixes some potential panics on unexpected use By [@goto-bus-stop](https://github.com/goto-bus-stop) in https://github.com/apollographql/router/pull/2177 +### Semi-automate the release ([PR #2202](https://github.com/apollographql/router/pull/2202)) + +Developers can now run: +`cargo xtask release prepare minor` + +To raise a release PR. + +By [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographql/router/pull/2202 + + +### Fix webpki license check ([PR #2202](https://github.com/apollographql/router/pull/2202)) + +There were issues with webpki license checks. This also meant that we were missing the Google Chromimum license which has als been fixed. +By [@o0Ignition0o](https://github.com/o0Ignition0o) [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographql/router/pull/2202 + ## 📚 Documentation ### Docs: Update cors match regex example ([Issue #2151](https://github.com/apollographql/router/issues/2151)) diff --git a/about.toml b/about.toml index b2418efc4c..c77910db9f 100644 --- a/about.toml +++ b/about.toml @@ -6,7 +6,7 @@ accepted = [ "CC0-1.0", "ISC", "LicenseRef-ELv2", - "LicenseRef-ring", + "LicenseRef-webpki", "MIT", "MPL-2.0", "Unicode-DFS-2016" @@ -17,6 +17,14 @@ private = { ignore = true } # Ignore dependencies used in tests only, test-log for example ignore-dev-dependencies = true +workarounds = [ + "ring", + "rustls", +] + +[ring] +accepted = ["OpenSSL"] + # apollographql licenses [xtask.clarify] license = "LicenseRef-ELv2" @@ -39,16 +47,18 @@ path = 'LICENSE' license = 'LicenseRef-ELv2' checksum = 'f527cb71b36ad7d828d0d1198ee0ab60db4170521a405661c0893f31b9962a6c' -[ring.clarify] -license = "LicenseRef-ring" -[[ring.clarify.files]] -path = "LICENSE" -license = "LicenseRef-ring" -checksum = '76b39f9b371688eac9d8323f96ee80b3aef5ecbc2217f25377bd4e4a615296a9' - [apollo-spaceport.clarify] license = "LicenseRef-ELv2" [[apollo-spaceport.clarify.files]] path = 'LICENSE' license = 'LicenseRef-ELv2' checksum = '6330b076d84694d0e8905c12d7a506e4ed8e5f4a7b0ddf41a3137483ff80be50' + +[webpki.clarify] +license = "ISC" +[[webpki.clarify.files]] +path = 'LICENSE' +checksum = "5b698ca13897be3afdb7174256fa1574f8c6892b8bea1a66dd6469d3fe27885a" +[[webpki.clarify.files]] +path = "third-party/chromium/LICENSE" +checksum = "845022e0c1db1abb41a6ba4cd3c4b674ec290f3359d9d3c78ae558d4c0ed9308" diff --git a/licenses.html b/licenses.html index 96d6c76db9..49cc81fd3d 100644 --- a/licenses.html +++ b/licenses.html @@ -44,13 +44,14 @@

    Third Party Licenses

    Overview of licenses:

    @@ -60,6 +61,7 @@

    All license text:

    Apache License 2.0

    Used by:

    @@ -1295,7 +1298,6 @@

    Used by:

  • opentelemetry-zipkin
  • os_str_bytes
  • ryu
  • -
  • serde_json_traversal
  • structopt
  • structopt-derive
  • unicode-linebreak
  • @@ -2136,6 +2138,7 @@

    Apache License 2.0

    Used by:

    @@ -6073,7 +6076,6 @@

    Used by:

  • bstr
  • bumpalo
  • cache-padded
  • -
  • camino
  • cc
  • cfg-if
  • concurrent-queue
  • @@ -6099,6 +6101,7 @@

    Used by:

  • fnv
  • form_urlencoded
  • fraction
  • +
  • ghost
  • gimli
  • git2
  • group
  • @@ -6115,6 +6118,7 @@

    Used by:

  • idna
  • if_chain
  • indexmap
  • +
  • inventory
  • itertools
  • itoa
  • jobserver
  • @@ -6156,6 +6160,7 @@

    Used by:

  • proc-macro-hack
  • proc-macro2
  • prost
  • +
  • proteus
  • quote
  • regex
  • regex-syntax
  • @@ -6164,6 +6169,7 @@

    Used by:

  • rustc-hash
  • rustc_version
  • rustc_version
  • +
  • rustls
  • rustls
  • rustls-native-certs
  • rustls-native-certs
  • @@ -6190,7 +6196,6 @@

    Used by:

  • smallvec
  • socket2
  • syn
  • -
  • tar
  • tempfile
  • text-size
  • thiserror
  • @@ -6200,6 +6205,8 @@

    Used by:

  • toml
  • try_match
  • typed-builder
  • +
  • typetag
  • +
  • typetag-impl
  • ucd-trie
  • unicase
  • unicode-bidi
  • @@ -6218,7 +6225,6 @@

    Used by:

  • wasm-bindgen-macro-support
  • wasm-bindgen-shared
  • web-sys
  • -
  • xattr
  • yaml-rust
                              Apache License
@@ -7879,7 +7885,6 @@ 

Apache License 2.0

Used by:

                              Apache License
@@ -9806,7 +9811,6 @@ 

Used by:

  • prost-derive
  • prost-types
  • rhai_codegen
  • -
  • rustls
  • thrift
  • tinyvec_macros
  • try_match_inner
  • @@ -9904,7 +9908,6 @@

    Used by:

    Apache License 2.0

    Used by:

    +
  • +

    Apache License 2.0

    +

    Used by:

    + +
    MIT OR Apache-2.0
    +
  • BSD 2-Clause "Simplified" License

    Used by:

    @@ -10218,41 +10229,6 @@

    Used by:

    2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -
  • - -
  • -

    BSD 3-Clause "New" or "Revised" License

    -

    Used by:

    - -
    // Copyright 2015 The Chromium Authors. All rights reserved.
    -//
    -// Redistribution and use in source and binary forms, with or without
    -// modification, are permitted provided that the following conditions are
    -// met:
    -//
    -//    * Redistributions of source code must retain the above copyright
    -// notice, this list of conditions and the following disclaimer.
    -//    * Redistributions in binary form must reproduce the above
    -// copyright notice, this list of conditions and the following disclaimer
    -// in the documentation and/or other materials provided with the
    -// distribution.
    -//    * Neither the name of Google Inc. nor the names of its
    -// contributors may be used to endorse or promote products derived from
    -// this software without specific prior written permission.
    -//
    -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
    -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
    -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     
  • @@ -10568,6 +10544,83 @@

    Used by:

    d. Affirmer understands and acknowledges that Creative Commons is not a party to this document and has no duty or obligation with respect to this CC0 or use of the Work. + +
  • +
  • +

    ISC License

    +

    Used by:

    + +
       Copyright 2015-2016 Brian Smith.
    +
    +   Permission to use, copy, modify, and/or distribute this software for any
    +   purpose with or without fee is hereby granted, provided that the above
    +   copyright notice and this permission notice appear in all copies.
    +
    +   THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
    +   WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
    +   MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
    +   SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
    +   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
    +   OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
    +   CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
    +
  • +
  • +

    ISC License

    +

    Used by:

    + +
    /* Copyright (c) 2015, Google Inc.
    + *
    + * Permission to use, copy, modify, and/or distribute this software for any
    + * purpose with or without fee is hereby granted, provided that the above
    + * copyright notice and this permission notice appear in all copies.
    + *
    + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
    + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
    + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
    + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
    + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
    + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
    + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
    +
    +
  • +
  • +

    ISC License

    +

    Used by:

    + +
    // Copyright 2015 The Chromium Authors. All rights reserved.
    +//
    +// Redistribution and use in source and binary forms, with or without
    +// modification, are permitted provided that the following conditions are
    +// met:
    +//
    +//    * Redistributions of source code must retain the above copyright
    +// notice, this list of conditions and the following disclaimer.
    +//    * Redistributions in binary form must reproduce the above
    +// copyright notice, this list of conditions and the following disclaimer
    +// in the documentation and/or other materials provided with the
    +// distribution.
    +//    * Neither the name of Google Inc. nor the names of its
    +// contributors may be used to endorse or promote products derived from
    +// this software without specific prior written permission.
    +//
    +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
    +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
    +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     
  • @@ -11097,6 +11150,34 @@

    Used by:

    The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +
  • +
  • +

    MIT License

    +

    Used by:

    + +
    Copyright (c) 2015-2016 the fiat-crypto authors (see
    +https://github.com/mit-plv/fiat-crypto/blob/master/AUTHORS).
    +
    +Permission is hereby granted, free of charge, to any person obtaining a copy
    +of this software and associated documentation files (the "Software"), to deal
    +in the Software without restriction, including without limitation the rights
    +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    +copies of the Software, and to permit persons to whom the Software is
    +furnished to do so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in all
    +copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    +SOFTWARE.
     
  • @@ -12230,6 +12311,34 @@

    Used by:

    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +
  • +
  • +

    MIT License

    +

    Used by:

    + +
    MIT License
    +
    +Copyright (c) [2019] [Changseok Han]
    +
    +Permission is hereby granted, free of charge, to any person obtaining a copy
    +of this software and associated documentation files (the "Software"), to deal
    +in the Software without restriction, including without limitation the rights
    +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    +copies of the Software, and to permit persons to whom the Software is
    +furnished to do so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in all
    +copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    +SOFTWARE.
  • MIT License

    @@ -12281,7 +12390,6 @@

    Used by:

    MIT License

    Used by:

    Permission is hereby granted, free of charge, to any
    @@ -12372,7 +12480,6 @@ 

    MIT License

    Used by:

    The MIT License (MIT)
     
    @@ -12794,6 +12901,38 @@ 

    Used by:

    OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +
    +
  • +
  • +

    MIT License

    +

    Used by:

    + +
    The MIT License (MIT)
    +
    +Copyright (c) 2018 pyros2097
    +
    +Permission is hereby granted, free of charge, to any person obtaining a copy
    +of this software and associated documentation files (the "Software"), to deal
    +in the Software without restriction, including without limitation the rights
    +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    +copies of the Software, and to permit persons to whom the Software is
    +furnished to do so, subject to the following conditions:
    +
    +The above copyright notice and this permission notice shall be included in all
    +copies or substantial portions of the Software.
    +
    +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
    +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
    +SOFTWARE.
    +
     
  • @@ -13348,6 +13487,66 @@

    Used by:

    ***** END LICENSE BLOCK ***** @(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $ +
  • +
  • +

    OpenSSL License

    +

    Used by:

    + +
    /* ====================================================================
    + * Copyright (c) 1998-2011 The OpenSSL Project.  All rights reserved.
    + *
    + * Redistribution and use in source and binary forms, with or without
    + * modification, are permitted provided that the following conditions
    + * are met:
    + *
    + * 1. Redistributions of source code must retain the above copyright
    + *    notice, this list of conditions and the following disclaimer. 
    + *
    + * 2. Redistributions in binary form must reproduce the above copyright
    + *    notice, this list of conditions and the following disclaimer in
    + *    the documentation and/or other materials provided with the
    + *    distribution.
    + *
    + * 3. All advertising materials mentioning features or use of this
    + *    software must display the following acknowledgment:
    + *    "This product includes software developed by the OpenSSL Project
    + *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
    + *
    + * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
    + *    endorse or promote products derived from this software without
    + *    prior written permission. For written permission, please contact
    + *    openssl-core@openssl.org.
    + *
    + * 5. Products derived from this software may not be called "OpenSSL"
    + *    nor may "OpenSSL" appear in their names without prior written
    + *    permission of the OpenSSL Project.
    + *
    + * 6. Redistributions of any form whatsoever must retain the following
    + *    acknowledgment:
    + *    "This product includes software developed by the OpenSSL Project
    + *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
    + *
    + * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
    + * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
    + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
    + * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
    + * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
    + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
    + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
    + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
    + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
    + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
    + * OF THE POSSIBILITY OF SUCH DAMAGE.
    + * ====================================================================
    + *
    + * This product includes cryptographic software written by Eric Young
    + * (eay@cryptsoft.com).  This product includes software written by Tim
    + * Hudson (tjh@cryptsoft.com).
    + *
    + */
  • Unicode License Agreement - Data Files and Software (2016)

    diff --git a/xtask/Cargo.lock b/xtask/Cargo.lock new file mode 100644 index 0000000000..d7cb5a23fb --- /dev/null +++ b/xtask/Cargo.lock @@ -0,0 +1,2318 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "aho-corasick" +version = "0.7.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" +dependencies = [ + "memchr", +] + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "ansi_term" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +dependencies = [ + "winapi", +] + +[[package]] +name = "anyhow" +version = "1.0.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6" + +[[package]] +name = "async-recursion" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2cda8f4bcc10624c4e85bc66b3f452cca98cfa5ca002dc83a16aad2367641bea" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "async-trait" +version = "0.1.59" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31e6e93155431f3931513b243d371981bb2770112b370c82745a1d19d2f99364" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "base64" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "block-buffer" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +dependencies = [ + "generic-array", +] + +[[package]] +name = "bumpalo" +version = "3.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" + +[[package]] +name = "byteorder" +version = "1.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" + +[[package]] +name = "bytes" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" + +[[package]] +name = "camino" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88ad0e1e3e88dd237a156ab9f571021b8a158caa0ae44b1968a241efb5144c1e" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo-platform" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbdb825da8a5df079a43676dbe042702f1707b1109f713a01420fbb4cc71fa27" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo_metadata" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "982a0cf6a99c350d7246035613882e376d58cebe571785abc5da4f648d53ac0a" +dependencies = [ + "camino", + "cargo-platform", + "semver", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "cc" +version = "1.0.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9f73505338f7d905b19d18738976aae232eb46b8efc15554ffc56deb5d9ebe4" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "chrono" +version = "0.4.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" +dependencies = [ + "iana-time-zone", + "js-sys", + "num-integer", + "num-traits", + "serde", + "time 0.1.45", + "wasm-bindgen", + "winapi", +] + +[[package]] +name = "clap" +version = "2.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" +dependencies = [ + "bitflags", + "textwrap", + "unicode-width", +] + +[[package]] +name = "codespan-reporting" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" +dependencies = [ + "termcolor", + "unicode-width", +] + +[[package]] +name = "core-foundation" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" + +[[package]] +name = "cpufeatures" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" +dependencies = [ + "libc", +] + +[[package]] +name = "crc32fast" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" +dependencies = [ + "cfg-if", + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "cxx" +version = "1.0.82" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4a41a86530d0fe7f5d9ea779916b7cadd2d4f9add748b99c2c029cbbdfaf453" +dependencies = [ + "cc", + "cxxbridge-flags", + "cxxbridge-macro", + "link-cplusplus", +] + +[[package]] +name = "cxx-build" +version = "1.0.82" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06416d667ff3e3ad2df1cd8cd8afae5da26cf9cec4d0825040f88b5ca659a2f0" +dependencies = [ + "cc", + "codespan-reporting", + "once_cell", + "proc-macro2", + "quote", + "scratch", + "syn", +] + +[[package]] +name = "cxxbridge-flags" +version = "1.0.82" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "820a9a2af1669deeef27cb271f476ffd196a2c4b6731336011e0ba63e2c7cf71" + +[[package]] +name = "cxxbridge-macro" +version = "1.0.82" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a08a6e2fcc370a089ad3b4aaf54db3b1b4cee38ddabce5896b33eb693275f470" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "digest" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" +dependencies = [ + "block-buffer", + "crypto-common", +] + +[[package]] +name = "dyn-clone" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f94fa09c2aeea5b8839e414b7b841bf429fd25b9c522116ac97ee87856d88b2" + +[[package]] +name = "either" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" + +[[package]] +name = "encoding_rs" +version = "0.8.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "fastrand" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" +dependencies = [ + "instant", +] + +[[package]] +name = "filetime" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b9663d381d07ae25dc88dbdf27df458faa83a9b25336bcac83d5e452b5fc9d3" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "windows-sys 0.42.0", +] + +[[package]] +name = "flate2" +version = "1.0.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "form_urlencoded" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +dependencies = [ + "matches", + "percent-encoding", +] + +[[package]] +name = "futures" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" + +[[package]] +name = "futures-executor" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" + +[[package]] +name = "futures-macro" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "futures-sink" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" + +[[package]] +name = "futures-task" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" + +[[package]] +name = "futures-util" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", +] + +[[package]] +name = "h2" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "heck" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + +[[package]] +name = "http" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +dependencies = [ + "bytes", + "http", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" + +[[package]] +name = "httpdate" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" + +[[package]] +name = "hyper" +version = "0.14.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2", + "tokio", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59df7c4e19c950e6e0e868dcc0a300b09a9b88e9ec55bd879ca819087a77355d" +dependencies = [ + "http", + "hyper", + "rustls", + "tokio", + "tokio-rustls", +] + +[[package]] +name = "hyper-tls" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +dependencies = [ + "bytes", + "hyper", + "native-tls", + "tokio", + "tokio-native-tls", +] + +[[package]] +name = "hyperx" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5617e92fc2f2501c3e2bc6ce547cad841adba2bae5b921c7e52510beca6d084c" +dependencies = [ + "base64", + "bytes", + "http", + "httpdate", + "language-tags", + "mime", + "percent-encoding", + "unicase", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.53" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "winapi", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +dependencies = [ + "cxx", + "cxx-build", +] + +[[package]] +name = "idna" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "418a0a6fab821475f634efe3ccc45c013f742efe03d853e8d3355d5cb850ecf8" +dependencies = [ + "matches", + "unicode-bidi", + "unicode-normalization", +] + +[[package]] +name = "indexmap" +version = "1.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" +dependencies = [ + "autocfg", + "hashbrown", +] + +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "ipnet" +version = "2.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f88c5561171189e69df9d98bcf18fd5f9558300f7ea7b801eb8a0fd748bd8745" + +[[package]] +name = "itertools" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" + +[[package]] +name = "js-sys" +version = "0.3.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "jsonwebtoken" +version = "8.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aa4b4af834c6cfd35d8763d359661b90f2e45d8f750a0849156c7f4671af09c" +dependencies = [ + "base64", + "pem", + "ring", + "serde", + "serde_json", + "simple_asn1", +] + +[[package]] +name = "language-tags" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4345964bb142484797b161f473a503a434de77149dd8c7427788c6e13379388" + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.137" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89" + +[[package]] +name = "link-cplusplus" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9272ab7b96c9046fbc5bc56c06c117cb639fe2d509df0c421cad82d2915cf369" +dependencies = [ + "cc", +] + +[[package]] +name = "lock_api" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" +dependencies = [ + "cfg-if", + "serde", +] + +[[package]] +name = "matches" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" + +[[package]] +name = "memchr" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" + +[[package]] +name = "mime" +version = "0.3.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" + +[[package]] +name = "mime_guess" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +dependencies = [ + "mime", + "unicase", +] + +[[package]] +name = "miniz_oxide" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" +dependencies = [ + "adler", +] + +[[package]] +name = "mio" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" +dependencies = [ + "libc", + "log", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.42.0", +] + +[[package]] +name = "native-tls" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +dependencies = [ + "lazy_static", + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + +[[package]] +name = "num-bigint" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-integer" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +dependencies = [ + "autocfg", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "octorust" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc812eb2afad6af6def735816a8349e1daa0173395214d036330c2951741768c" +dependencies = [ + "anyhow", + "async-recursion", + "chrono", + "http", + "hyperx", + "jsonwebtoken", + "log", + "mime", + "pem", + "percent-encoding", + "reqwest", + "reqwest-conditional-middleware", + "reqwest-middleware", + "reqwest-retry", + "reqwest-tracing", + "ring", + "schemars", + "serde", + "serde_json", + "serde_urlencoded", + "tokio", + "url", +] + +[[package]] +name = "once_cell" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860" + +[[package]] +name = "openssl" +version = "0.10.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "020433887e44c27ff16365eaa2d380547a94544ad509aff6eb5b6e3e0b27b376" +dependencies = [ + "bitflags", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + +[[package]] +name = "openssl-sys" +version = "0.9.78" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07d5c8cb6e57b3a3612064d7b18b117912b4ce70955c2504d4b741c9e244b132" +dependencies = [ + "autocfg", + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "opentelemetry" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6105e89802af13fdf48c49d7646d3b533a70e536d818aae7e78ba0433d01acb8" +dependencies = [ + "async-trait", + "crossbeam-channel", + "futures-channel", + "futures-executor", + "futures-util", + "js-sys", + "lazy_static", + "percent-encoding", + "pin-project", + "rand", + "thiserror", +] + +[[package]] +name = "parking_lot" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ff9f3fef3968a3ec5945535ed654cb38ff72d7495a25619e2247fb15a2ed9ba" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-sys 0.42.0", +] + +[[package]] +name = "pem" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03c64931a1a212348ec4f3b4362585eca7159d0d09cbdf4a7f74f02173596fd4" +dependencies = [ + "base64", +] + +[[package]] +name = "percent-encoding" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" + +[[package]] +name = "pin-project" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkg-config" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" + +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro2" +version = "1.0.47" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "redox_syscall" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +dependencies = [ + "bitflags", +] + +[[package]] +name = "regex" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.6.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" + +[[package]] +name = "remove_dir_all" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" +dependencies = [ + "winapi", +] + +[[package]] +name = "reqwest" +version = "0.11.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68cc60575865c7831548863cc02356512e3f1dc2f3f82cb837d7fc4cc8f3c97c" +dependencies = [ + "base64", + "bytes", + "encoding_rs", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "hyper", + "hyper-rustls", + "hyper-tls", + "ipnet", + "js-sys", + "log", + "mime", + "mime_guess", + "native-tls", + "once_cell", + "percent-encoding", + "pin-project-lite", + "rustls", + "rustls-pemfile", + "serde", + "serde_json", + "serde_urlencoded", + "tokio", + "tokio-native-tls", + "tokio-rustls", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "webpki-roots", + "winreg", +] + +[[package]] +name = "reqwest-conditional-middleware" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bce134f515eb4c2748bbd928086e7b0aae0d1568daf6c63b51e829aa6f2cf464" +dependencies = [ + "async-trait", + "reqwest", + "reqwest-middleware", + "task-local-extensions", +] + +[[package]] +name = "reqwest-middleware" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69539cea4148dce683bec9dc95be3f0397a9bb2c248a49c8296a9d21659a8cdd" +dependencies = [ + "anyhow", + "async-trait", + "futures", + "http", + "reqwest", + "serde", + "task-local-extensions", + "thiserror", +] + +[[package]] +name = "reqwest-retry" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce246a729eaa6aff5e215aee42845bf5fed9893cc6cd51aeeb712f34e04dd9f3" +dependencies = [ + "anyhow", + "async-trait", + "chrono", + "futures", + "http", + "hyper", + "reqwest", + "reqwest-middleware", + "retry-policies", + "task-local-extensions", + "tokio", + "tracing", +] + +[[package]] +name = "reqwest-tracing" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64977f9a47fa7768cc88751e29026e569730ac1667c2eaeaac04b32624849fbe" +dependencies = [ + "async-trait", + "opentelemetry", + "reqwest", + "reqwest-middleware", + "task-local-extensions", + "tokio", + "tracing", + "tracing-opentelemetry", +] + +[[package]] +name = "retry-policies" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e09bbcb5003282bcb688f0bae741b278e9c7e8f378f561522c9806c58e075d9b" +dependencies = [ + "anyhow", + "chrono", + "rand", +] + +[[package]] +name = "ring" +version = "0.16.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +dependencies = [ + "cc", + "libc", + "once_cell", + "spin", + "untrusted", + "web-sys", + "winapi", +] + +[[package]] +name = "rustls" +version = "0.20.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "539a2bfe908f471bfa933876bd1eb6a19cf2176d375f82ef7f99530a40e48c2c" +dependencies = [ + "log", + "ring", + "sct", + "webpki", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0864aeff53f8c05aa08d86e5ef839d3dfcf07aeba2db32f12db0ef716e87bd55" +dependencies = [ + "base64", +] + +[[package]] +name = "ryu" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "schannel" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" +dependencies = [ + "lazy_static", + "windows-sys 0.36.1", +] + +[[package]] +name = "schemars" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a5fb6c61f29e723026dc8e923d94c694313212abbecbbe5f55a7748eec5b307" +dependencies = [ + "bytes", + "chrono", + "dyn-clone", + "schemars_derive", + "serde", + "serde_json", + "url", + "uuid", +] + +[[package]] +name = "schemars_derive" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f188d036977451159430f3b8dc82ec76364a42b7e289c2b18a9a18f4470058e9" +dependencies = [ + "proc-macro2", + "quote", + "serde_derive_internals", + "syn", +] + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "scratch" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8132065adcfd6e02db789d9285a0deb2f3fcb04002865ab67d5fb103533898" + +[[package]] +name = "sct" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "security-framework" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" +dependencies = [ + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "semver" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4" +dependencies = [ + "serde", +] + +[[package]] +name = "serde" +version = "1.0.148" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e53f64bb4ba0191d6d0676e1b141ca55047d83b74f5607e6d8eb88126c52c2dc" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.148" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a55492425aa53521babf6137309e7d34c20bbfbbfcfe2c7f3a047fd1f6b92c0c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_derive_internals" +version = "0.26.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db" +dependencies = [ + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "serde_json_traversal" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8363f19aa1f3b2fc0bc9648fdde59c1074327a6f756fdd502fb1cfeb8179323c" + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "sha2" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sharded-slab" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "signal-hook-registry" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e51e73328dc4ac0c7ccbda3a494dfa03df1de2f46018127f60c693f2648455b0" +dependencies = [ + "libc", +] + +[[package]] +name = "simple_asn1" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" +dependencies = [ + "num-bigint", + "num-traits", + "thiserror", + "time 0.3.17", +] + +[[package]] +name = "slab" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smallvec" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" + +[[package]] +name = "socket2" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "structopt" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c6b5c64445ba8094a6ab0c3cd2ad323e07171012d9c98b0b15651daf1787a10" +dependencies = [ + "clap", + "lazy_static", + "structopt-derive", +] + +[[package]] +name = "structopt-derive" +version = "0.4.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" +dependencies = [ + "heck", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "syn" +version = "1.0.105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60b9b43d45702de4c839cb9b51d9f529c5dd26a4aff255b42b1ebc03e88ee908" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "tar" +version = "0.4.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b55807c0344e1e6c04d7c965f5289c39a8d94ae23ed5c0b57aabac549f871c6" +dependencies = [ + "filetime", + "libc", + "xattr", +] + +[[package]] +name = "task-local-extensions" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4167afbec18ae012de40f8cf1b9bf48420abb390678c34821caa07d924941cc4" +dependencies = [ + "tokio", +] + +[[package]] +name = "tempfile" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +dependencies = [ + "cfg-if", + "fastrand", + "libc", + "redox_syscall", + "remove_dir_all", + "winapi", +] + +[[package]] +name = "termcolor" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "textwrap" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" +dependencies = [ + "unicode-width", +] + +[[package]] +name = "thiserror" +version = "1.0.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thread_local" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +dependencies = [ + "once_cell", +] + +[[package]] +name = "time" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" +dependencies = [ + "libc", + "wasi 0.10.0+wasi-snapshot-preview1", + "winapi", +] + +[[package]] +name = "time" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" +dependencies = [ + "itoa", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" + +[[package]] +name = "time-macros" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d967f99f534ca7e495c575c62638eebc2898a8c84c119b89e250477bc4ba16b2" +dependencies = [ + "time-core", +] + +[[package]] +name = "tinyvec" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" + +[[package]] +name = "tokio" +version = "1.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d76ce4a75fb488c605c54bf610f221cea8b0dafb53333c1a67e8ee199dcd2ae3" +dependencies = [ + "autocfg", + "bytes", + "libc", + "memchr", + "mio", + "num_cpus", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "winapi", +] + +[[package]] +name = "tokio-macros" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +dependencies = [ + "native-tls", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.23.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" +dependencies = [ + "rustls", + "tokio", + "webpki", +] + +[[package]] +name = "tokio-util" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", + "tracing", +] + +[[package]] +name = "tower-service" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" + +[[package]] +name = "tracing" +version = "0.1.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +dependencies = [ + "cfg-if", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" +dependencies = [ + "lazy_static", + "log", + "tracing-core", +] + +[[package]] +name = "tracing-opentelemetry" +version = "0.17.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbbe89715c1dbbb790059e2565353978564924ee85017b5fff365c872ff6721f" +dependencies = [ + "once_cell", + "opentelemetry", + "tracing", + "tracing-core", + "tracing-log", + "tracing-subscriber", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" +dependencies = [ + "sharded-slab", + "thread_local", + "tracing-core", +] + +[[package]] +name = "try-lock" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" + +[[package]] +name = "typenum" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" + +[[package]] +name = "unicase" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +dependencies = [ + "version_check", +] + +[[package]] +name = "unicode-bidi" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" + +[[package]] +name = "unicode-ident" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" + +[[package]] +name = "unicode-normalization" +version = "0.1.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-segmentation" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fdbf052a0783de01e944a6ce7a8cb939e295b1e7be835a1112c3b9a7f047a5a" + +[[package]] +name = "unicode-width" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" + +[[package]] +name = "untrusted" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" + +[[package]] +name = "url" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22fe195a4f217c25b25cb5058ced57059824a678474874038dc88d211bf508d3" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", +] + +[[package]] +name = "uuid" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "422ee0de9031b5b948b97a8fc04e3aa35230001a722ddd27943e0be31564ce4c" + +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "version_check" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "walkdir" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" +dependencies = [ + "same-file", + "winapi", + "winapi-util", +] + +[[package]] +name = "want" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +dependencies = [ + "log", + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasm-bindgen" +version = "0.2.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" + +[[package]] +name = "web-sys" +version = "0.3.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "webpki-roots" +version = "0.22.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "368bfe657969fb01238bb756d351dcade285e0f6fcbd36dcb23359a5169975be" +dependencies = [ + "webpki", +] + +[[package]] +name = "which" +version = "4.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c831fbbee9e129a8cf93e7747a82da9d95ba8e16621cae60ec2cdc849bacb7b" +dependencies = [ + "either", + "libc", + "once_cell", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-sys" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" +dependencies = [ + "windows_aarch64_msvc 0.36.1", + "windows_i686_gnu 0.36.1", + "windows_i686_msvc 0.36.1", + "windows_x86_64_gnu 0.36.1", + "windows_x86_64_msvc 0.36.1", +] + +[[package]] +name = "windows-sys" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc 0.42.0", + "windows_i686_gnu 0.42.0", + "windows_i686_msvc 0.42.0", + "windows_x86_64_gnu 0.42.0", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc 0.42.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" + +[[package]] +name = "windows_i686_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" + +[[package]] +name = "windows_i686_gnu" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" + +[[package]] +name = "windows_i686_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" + +[[package]] +name = "windows_i686_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" + +[[package]] +name = "winreg" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80d0f4e272c85def139476380b12f9ac60926689dd2e01d4923222f40580869d" +dependencies = [ + "winapi", +] + +[[package]] +name = "xattr" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d1526bbe5aaeb5eb06885f4d987bcdfa5e23187055de9b83fe00156a821fabc" +dependencies = [ + "libc", +] + +[[package]] +name = "xtask" +version = "1.4.0" +dependencies = [ + "ansi_term", + "anyhow", + "base64", + "camino", + "cargo_metadata", + "chrono", + "flate2", + "itertools", + "libc", + "octorust", + "once_cell", + "regex", + "reqwest", + "serde_json", + "serde_json_traversal", + "sha2", + "structopt", + "tap", + "tar", + "tempfile", + "tokio", + "walkdir", + "which", + "zip", +] + +[[package]] +name = "zip" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "537ce7411d25e54e8ae21a7ce0b15840e7bfcff15b51d697ec3266cc76bdf080" +dependencies = [ + "byteorder", + "crc32fast", + "crossbeam-utils", +] diff --git a/xtask/Cargo.toml b/xtask/Cargo.toml index 3fc0863225..eb74353b2b 100644 --- a/xtask/Cargo.toml +++ b/xtask/Cargo.toml @@ -1,9 +1,11 @@ +[workspace] + [package] name = "xtask" version = "1.4.0" authors = ["Apollo Graph, Inc. "] edition = "2021" -license = "Elastic-2.0" +license = "LicenseRef-ELv2" publish = false [dependencies] @@ -12,9 +14,13 @@ anyhow = "1" base64 = "0.13" camino = "1" cargo_metadata = "0.15" +chrono = "0.4.19" flate2 = "1" +itertools = "0.10.5" libc = "0.2" +octorust = "0.2.1" once_cell = "1" +regex="1.7.0" reqwest = { version = "0.11", default-features = false, features = [ "blocking", "native-tls", @@ -24,6 +30,9 @@ serde_json_traversal = "0.2" structopt = { version = "0.3", default-features = false } tar = "0.4" tempfile = "3" +tap = "1.0.1" +tokio = "1.17.0" which = "4" zip = { version = "0.6", default-features = false } sha2 = "0.10" +walkdir = "2.3.2" diff --git a/xtask/src/commands/mod.rs b/xtask/src/commands/mod.rs index 7a5c57f618..aff7cb71b0 100644 --- a/xtask/src/commands/mod.rs +++ b/xtask/src/commands/mod.rs @@ -3,6 +3,7 @@ pub(crate) mod compliance; pub(crate) mod dist; pub(crate) mod lint; pub(crate) mod package; +pub(crate) mod release; pub(crate) mod test; pub(crate) use all::All; diff --git a/xtask/src/commands/release.rs b/xtask/src/commands/release.rs new file mode 100644 index 0000000000..7e959c0117 --- /dev/null +++ b/xtask/src/commands/release.rs @@ -0,0 +1,582 @@ +use anyhow::{anyhow, Error, Result}; +use cargo_metadata::MetadataCommand; +use itertools::Itertools; +use octorust::types::{ + IssuesCreateMilestoneRequest, IssuesListMilestonesSort, IssuesListState, IssuesUpdateRequest, + Milestone, Order, PullsCreateRequest, State, TitleOneOf, +}; +use octorust::Client; +use std::str::FromStr; +use structopt::StructOpt; +use tap::TapFallible; +use walkdir::WalkDir; +use xtask::*; + +#[derive(Debug, StructOpt)] +pub enum Command { + /// Prepare a new release + Prepare(Prepare), +} + +impl Command { + pub fn run(&self) -> Result<()> { + match self { + Command::Prepare(command) => command.run(), + } + } +} + +#[derive(Debug, Clone, Eq, PartialEq)] +enum Version { + Major, + Minor, + Patch, + Current, + Version(String), +} + +type ParseError = &'static str; + +impl FromStr for Version { + type Err = ParseError; + fn from_str(version: &str) -> Result { + Ok(match version { + "major" => Version::Major, + "minor" => Version::Minor, + "patch" => Version::Patch, + "current" => Version::Current, + version => Version::Version(version.to_string()), + }) + } +} + +#[derive(Debug, StructOpt)] +pub struct Prepare { + /// Release from the current branch rather than creating a new one. + #[structopt(long)] + current_branch: bool, + + /// Skip the license check + #[structopt(long)] + skip_license_ckeck: bool, + + /// Dry run, don't commit the changes and create the PR. + #[structopt(long)] + dry_run: bool, + + /// The new version that is being created OR to bump (major|minor|patch|current). + version: Version, +} + +macro_rules! git { + ($( $i:expr ),*) => { + let git = which::which("git")?; + let result = std::process::Command::new(git).args([$( $i ),*]).status()?; + if !result.success() { + return Err(anyhow!("git {}", [$( $i ),*].join(","))); + } + }; +} + +macro_rules! replace_in_file { + ($path:expr, $regex:expr, $replacement:expr) => { + let before = std::fs::read_to_string($path)?; + let re = regex::Regex::new(&format!("(?m){}", $regex))?; + let after = re.replace_all(&before, $replacement); + std::fs::write($path, &after.as_ref())?; + }; +} + +impl Prepare { + pub fn run(&self) -> Result<()> { + tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .unwrap() + .block_on(async { + let result = self.prepare_release().await; + if self.dry_run { + git!("reset", "--hard"); + } + result + }) + } + + async fn prepare_release(&self) -> Result<(), Error> { + let version = self.update_cargo_tomls(&self.version)?; + let github = octorust::Client::new( + "router-release".to_string(), + octorust::auth::Credentials::Token( + std::env::var("GITHUB_TOKEN").expect("GITHUB_TOKEN env variable must be set"), + ), + )?; + if !self.current_branch && !self.dry_run { + self.switch_to_release_branch(&version)?; + } + self.assign_issues_to_milestone(&github, &version).await?; + self.update_install_script(&version)?; + self.update_docs(&version)?; + self.update_helm_charts(&version)?; + self.docker_files(&version)?; + self.finalize_changelog(&version)?; + self.update_lock()?; + self.check_compliance()?; + if !self.dry_run { + self.create_release_pr(&github, &version).await?; + } + Ok(()) + } + + /// Create a new branch "#.#.#" where "#.#.#" is this release's version + /// (release) or "#.#.#-rc.#" (release candidate) + fn switch_to_release_branch(&self, version: &str) -> Result<()> { + println!("creating release branch"); + git!("fetch", "origin", &format!("dev:{}", version)); + git!("checkout", &version); + Ok(()) + } + + /// Go through NEXT_CHANGELOG.md find all issues and assign to the milestone. + /// Any PR that doesn't have an issue assign to the milestone. + async fn assign_issues_to_milestone(&self, github: &Client, version: &str) -> Result<()> { + println!("assigning issues and PRs to milestone v{}", version); + let change_log = std::fs::read_to_string("./NEXT_CHANGELOG.md")?; + + let re = + regex::Regex::new(r"(?ms)https://github.com/apollographql/router/(pull|issues)/(\d+)")?; + + let milestone = self.get_or_create_milestone(&github, version).await?; + let mut errors_encountered = false; + for (issues_or_pull, number) in re + .captures_iter(&change_log) + .map(|m| { + ( + m.get(1).expect("expected issues or pull").as_str(), + m.get(2).expect("expected issue or pull number").as_str(), + ) + }) + .sorted() + .dedup() + { + if let Err(e) = self + .handle_issue_or_pr(&github, &milestone, issues_or_pull, number) + .await + { + eprintln!("{}", e); + errors_encountered = true; + } + } + if errors_encountered { + return Err(anyhow!("errors encountered, aborting")); + } + Ok(()) + } + + async fn get_or_create_milestone(&self, github: &Client, version: &str) -> Result { + Ok( + match github + .issues() + .list_milestones( + "apollographql", + "router", + IssuesListState::Open, + IssuesListMilestonesSort::FallthroughString, + Order::FallthroughString, + 30, + 1, + ) + .await? + .into_iter() + .find(|m| m.title == format!("v{}", version)) + { + Some(milestone) => milestone, + None => { + println!("milestone not found, creating..."); + if !self.dry_run { + github + .issues() + .create_milestone( + "apollographql", + "router", + &IssuesCreateMilestoneRequest { + description: format!("Release v{}", version), + due_on: None, + state: Some(State::Open), + title: format!("v{}", version), + }, + ) + .await + .tap_err(|_| eprintln!("Failed to create milestone"))? + } else { + Milestone { + closed_at: None, + closed_issues: 0, + created_at: None, + creator: None, + description: "".to_string(), + due_on: None, + html_url: "".to_string(), + id: 0, + labels_url: "".to_string(), + node_id: "".to_string(), + number: 0, + open_issues: 0, + state: Default::default(), + title: "".to_string(), + updated_at: None, + url: "".to_string(), + } + } + } + }, + ) + } + + async fn handle_issue_or_pr( + &self, + github: &Client, + milestone: &Milestone, + issues_or_pull: &str, + number: &str, + ) -> Result<()> { + match issues_or_pull { + "issues" => { + let issue = github + .issues() + .get("apollographql", "router", number.parse()?) + .await + .tap_err(|_| { + eprintln!( + "could not find issue {}, there is an error in NEXT_CHANGELOG.md", + number + ) + })?; + match issue.milestone { + None => { + println!("assigning milestone to https://github.com/apollographql/router/issues/{}", number); + self.update_milestone(github, &milestone, issue.number) + .await?; + } + Some(issue_milestone) if issue_milestone.id != milestone.id => { + return Err(anyhow!("issue https://github.com/apollographql/router/issues/{} was assigned to an existing milestone", number)); + } + _ => {} + } + if issue.assignees.is_empty() { + return Err(anyhow!( + "https://github.com/apollographql/router/issues/{} has no assignee", + number + )); + } + } + "pull" => { + let pull = github + .pulls() + .get("apollographql", "router", number.parse()?) + .await + .tap_err(|_| { + eprintln!( + "could not find PR {}, there is an error in NEXT_CHANGELOG.md", + number + ) + })?; + match pull.milestone { + None => { + println!( + "assigning milestone to https://github.com/apollographql/router/pull/{}", + number + ); + self.update_milestone(github, &milestone, pull.number) + .await?; + } + Some(pull_milestone) if pull_milestone.id != milestone.id => { + return Err(anyhow!("issue https://github.com/apollographql/router/pull/{} was assigned to an existing milestone", number)); + } + _ => {} + } + if pull.assignees.is_empty() { + return Err(anyhow!( + "https://github.com/apollographql/router/pull/{} has no assignee", + number + )); + } + if pull.state == State::Open { + return Err(anyhow!( + "https://github.com/apollographql/router/pull/{} is still open", + number + )); + } + } + _ => panic!("expected issues or pull"), + } + Ok(()) + } + + async fn update_milestone( + &self, + github: &Client, + milestone: &Milestone, + issue: i64, + ) -> Result<()> { + if !self.dry_run { + github + .issues() + .update( + "apollographql", + "router", + issue, + &IssuesUpdateRequest { + assignee: "".to_string(), + assignees: vec![], + body: "".to_string(), + labels: vec![], + milestone: Some(TitleOneOf::I64(milestone.number)), + state: None, + title: None, + }, + ) + .await?; + } + Ok(()) + } + + /// Update the `version` in `*/Cargo.toml` (do not forget the ones in scaffold templates). + /// Update the `apollo-router` version in the `dependencies` sections of the `Cargo.toml` files in `apollo-router-scaffold/templates/**`. + fn update_cargo_tomls(&self, version: &Version) -> Result { + println!("updating Cargo.toml files"); + match version { + Version::Current => {} + Version::Major => cargo!([ + "set-version", + "--bump", + "major", + "--package", + "apollo-router" + ]), + Version::Minor => cargo!([ + "set-version", + "--bump", + "minor", + "--package", + "apollo-router" + ]), + Version::Patch => cargo!([ + "set-version", + "--bump", + "patch", + "--package", + "apollo-router" + ]), + Version::Version(version) => { + cargo!(["set-version", version, "--package", "apollo-router"]) + } + } + + let metadata = MetadataCommand::new() + .manifest_path("./apollo-router/Cargo.toml") + .exec()?; + let version = metadata + .root_package() + .expect("root package missing") + .version + .to_string(); + let packages = vec!["apollo-router-scaffold", "apollo-router-benchmarks"]; + for package in packages { + cargo!(["set-version", &version, "--package", package]) + } + Ok(version) + } + + /// Update the `PACKAGE_VERSION` value in `scripts/install.sh` (it should be prefixed with `v`!) + fn update_install_script(&self, version: &str) -> Result<()> { + println!("updating install script"); + replace_in_file!( + "./scripts/install.sh", + "^PACKAGE_VERSION=.*$", + format!("PACKAGE_VERSION=\"v{}\"", version) + ); + Ok(()) + } + + /// Update `docker.mdx` and `kubernetes.mdx` with the release version. + /// Update the kubernetes section of the docs: + /// - go to the `helm/chart/router` folder + /// - run + /// ```helm template --set router.configuration.telemetry.metrics.prometheus.enabled=true --set managedFederation.apiKey="REDACTED" --set managedFederation.graphRef="REDACTED" --debug .``` + /// - Paste the output in the `Kubernetes Configuration` example of the `docs/source/containerization/kubernetes.mdx` file + fn update_docs(&self, version: &str) -> Result<()> { + println!("updating docs"); + replace_in_file!( + "./docs/source/containerization/docker.mdx", + "with your chosen version. e.g.: `v\\d+.\\d+.\\d+`", + format!("with your chosen version. e.g.: `v{}`", version) + ); + replace_in_file!( + "./docs/source/containerization/kubernetes.mdx", + "router/tree/v\\d+.\\d+.\\d+", + format!("router/tree/v{}", version) + ); + let helm_chart = String::from_utf8( + std::process::Command::new(which::which("helm")?) + .current_dir("./helm/chart/router") + .args([ + "template", + "--set", + "router.configuration.telemetry.metrics.prometheus.enabled=true", + "--set", + "managedFederation.apiKey=\"REDACTED\"", + "--set", + "managedFederation.graphRef=\"REDACTED\"", + "--debug", + ".", + ]) + .output()? + .stdout, + )?; + + replace_in_file!( + "./docs/source/containerization/kubernetes.mdx", + "^```yaml\n---\n# Source: router/templates/serviceaccount.yaml(.|\n)+?```", + format!("```yaml\n{}\n```", helm_chart.trim()) + ); + + Ok(()) + } + + /// Update `helm/chart/router/README.md` by running this from the repo root: `(cd helm/chart && helm-docs router)`. + /// (If not installed, you should [install `helm-docs`](https://github.com/norwoodj/helm-docs)) + fn update_helm_charts(&self, version: &str) -> Result<()> { + println!("updating helm charts"); + if !std::process::Command::new(which::which("helm-docs")?) + .current_dir("./helm/chart") + .args(["helm-docs", "router"]) + .status()? + .success() + { + return Err(anyhow!("failed to generate helm docs")); + } + + replace_in_file!( + "./helm/chart/router/Chart.yaml", + "appVersion: \"v\\d+.\\d+.\\d+\"", + format!("appVersion: \"v{}\"", version) + ); + + Ok(()) + } + /// Update the `image` of the Docker image within `docker-compose*.yml` files inside the `dockerfiles` directory. + fn docker_files(&self, version: &str) -> Result<()> { + println!("updating docker files"); + for entry in WalkDir::new("./dockerfiles") { + let entry = entry?; + if entry + .file_name() + .to_string_lossy() + .starts_with("docker-compose.") + { + replace_in_file!( + entry.path(), + r"ghcr.io/apollographql/router:v\d+.\d+.\d+", + format!("ghcr.io/apollographql/router:v{}", version) + ); + } + } + Ok(()) + } + + /// Add a new section in `CHANGELOG.md` with the contents of `NEXT_CHANGELOG.md` + /// Put a Release date and the version number on the new `CHANGELOG.md` section + /// Update the version in `NEXT_CHANGELOG.md`. + /// Clear `NEXT_CHANGELOG.md` leaving only the template. + fn finalize_changelog(&self, version: &str) -> Result<()> { + println!("finalizing changelog"); + let next_changelog = std::fs::read_to_string("./NEXT_CHANGELOG.md")?; + let changelog = std::fs::read_to_string("./CHANGELOG.md")?; + let changes_regex = + regex::Regex::new(r"(?ms)(.*# \[x.x.x\] \(unreleased\) - ....-mm-dd\n)(.*)")?; + let captures = changes_regex + .captures(&next_changelog) + .expect("changelog format was unexpected"); + let template = captures + .get(1) + .expect("changelog format was unexpected") + .as_str(); + let changes = captures + .get(2) + .expect("changelog format was unexpected") + .as_str(); + + let update_regex = regex::Regex::new( + r"(?ms)This project adheres to \[Semantic Versioning v2.0.0\]\(https://semver.org/spec/v2.0.0.html\).\n", + )?; + let updated = update_regex.replace(&changelog, format!("This project adheres to [Semantic Versioning v2.0.0](https://semver.org/spec/v2.0.0.html).\n\n# [{}] - {}\n{}\n", version, chrono::Utc::now().date_naive(), changes)); + std::fs::write("./CHANGELOG.md", updated.to_string())?; + std::fs::write("./NEXT_CHANGELOG.md", template.to_string())?; + Ok(()) + } + /// Update the license list with `cargo about generate --workspace -o licenses.html about.hbs`. + /// (If not installed, you can install `cargo-about` by running `cargo install cargo-about`.) + /// Run `cargo xtask check-compliance`. + fn check_compliance(&self) -> Result<()> { + println!("checking compliance"); + cargo!([ + "about", + "generate", + "--workspace", + "-o", + "licenses.html", + "about.hbs" + ]); + if !self.skip_license_ckeck { + cargo!(["xtask", "check-compliance"]); + } + Ok(()) + } + + /// Run `cargo check` so the lock file gets updated. + fn update_lock(&self) -> Result<()> { + println!("updating lock file"); + cargo!(["check"]); + Ok(()) + } + + /// Create the release PR + async fn create_release_pr(&self, github: &Client, version: &str) -> Result<()> { + let git = which::which("git")?; + let result = std::process::Command::new(git) + .args(["branch", "--show-current"]) + .output()?; + if !result.status.success() { + return Err(anyhow!("failed to get git current branch")); + } + let current_branch = String::from_utf8(result.stdout)?; + + println!("creating release PR"); + git!("add", "-u"); + git!("commit", "-m", &format!("release {}", version)); + git!( + "push", + "--set-upstream", + "origin", + &format!("{}:{}", current_branch.trim(), version) + ); + github + .pulls() + .create( + "apollographql", + "router", + &PullsCreateRequest { + base: "main".to_string(), + body: format!("Release {}", version), + draft: None, + head: version.to_string(), + issue: 0, + maintainer_can_modify: None, + title: format!("Release {}", version), + }, + ) + .await + .tap_err(|_| eprintln!("failed to create release PR"))?; + Ok(()) + } +} diff --git a/xtask/src/main.rs b/xtask/src/main.rs index 5558733488..551d8b32c5 100644 --- a/xtask/src/main.rs +++ b/xtask/src/main.rs @@ -37,6 +37,9 @@ pub enum Command { /// Package build. Package(commands::Package), + + /// Prepare a release + Release(commands::release::Command), } impl Xtask { @@ -48,6 +51,7 @@ impl Xtask { Command::Lint(command) => command.run(), Command::Test(command) => command.run(), Command::Package(command) => command.run(), + Command::Release(command) => command.run(), }?; eprintln!("{}", Green.bold().paint("Success!")); Ok(()) From 5839fce771d4eb692e0dbf73a1cd064fba143df9 Mon Sep 17 00:00:00 2001 From: bryn Date: Mon, 5 Dec 2022 15:06:32 +0000 Subject: [PATCH 45/45] release: v1.5.0 --- CHANGELOG.md | 336 ++++++++++++++++++ Cargo.lock | 6 +- NEXT_CHANGELOG.md | 326 +---------------- apollo-router-benchmarks/Cargo.toml | 2 +- apollo-router-scaffold/Cargo.toml | 2 +- apollo-router/Cargo.toml | 2 +- .../tracing/docker-compose.datadog.yml | 2 +- dockerfiles/tracing/docker-compose.jaeger.yml | 2 +- dockerfiles/tracing/docker-compose.zipkin.yml | 2 +- docs/source/containerization/docker.mdx | 2 +- docs/source/containerization/kubernetes.mdx | 8 +- helm/chart/router/Chart.yaml | 4 +- helm/chart/router/README.md | 1 + scripts/install.sh | 2 +- 14 files changed, 355 insertions(+), 342 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 97058fbab9..5ae542a7e5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,342 @@ All notable changes to Router will be documented in this file. This project adheres to [Semantic Versioning v2.0.0](https://semver.org/spec/v2.0.0.html). +# [1.5.0] - 2022-12-06 +## ❗ BREAKING ❗ + +### Router debug Docker images now run under the control of heaptrack ([Issue #2135](https://github.com/apollographql/router/issues/2135)) + +From 1.5.0, our debug Docker image will invoke the router under the control of heaptrack. We are making this change to make it simple for users to investigate potential memory issues with the Router. + +Do not run debug images in performance sensitive contexts. The tracking of memory allocations will significantly impact performance. In general, the debug image should only be used in consultation with Apollo engineering and support. + +Look at our documentation for examples of how to use the image in either Docker or Kubernetes. + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/2142 + +### Fix naming inconsistency of telemetry.metrics.common.attributes.router ([Issue #2076](https://github.com/apollographql/router/issues/2076)) + +Mirroring the rest of the config `router` should be `supergraph` + +```yaml +telemetry: + metrics: + common: + attributes: + router: # old +``` +becomes +```yaml +telemetry: + metrics: + common: + attributes: + supergraph: # new +``` + +By [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographql/router/pull/2116 + +### CLI structure changes ([Issue #2123](https://github.com/apollographql/router/issues/2123)) + +There is now a separate subcommand for config related operations: +* `config` + * `schema` - Output the configuration schema + * `upgrade` - Upgrade the configuration with optional diff support. + +`router --schema` has been deprecated and users should move to `router config schema`. + +By [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographql/router/pull/2116 + +## 🚀 Features + +### Add configuration for trace ID ([Issue #2080](https://github.com/apollographql/router/issues/2080)) + +Trace ids can be propagated directly from a request header: + +```yaml title="router.yaml" +telemetry: + tracing: + propagation: + # If you have your own way to generate a trace id and you want to pass it via a custom request header + request: + header_name: my-trace-id +``` +In addition, trace id can be exposed via a response header: +```yaml title="router.yaml" +telemetry: + tracing: + experimental_response_trace_id: + enabled: true # default: false + header_name: "my-trace-id" # default: "apollo-trace-id" +``` + +Using this configuration you will have a response header called `my-trace-id` containing the trace ID. It could help you to debug a specific query if you want to grep your log with this trace id to have more context. + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/2131 + +### Add configuration for logging and add more logs ([Issue #1998](https://github.com/apollographql/router/issues/1998)) + +By default, logs do not contain request body, response body or headers. +It is now possible to conditionally add this information for debugging and audit purposes. +Here is an example how you can configure it: + +```yaml title="router.yaml" +telemetry: + experimental_logging: + format: json # By default it's "pretty" if you are in an interactive shell session + display_filename: true # Display filename where the log is coming from. Default: true + display_line_number: false # Display line number in the file where the log is coming from. Default: true + # If one of these headers matches we will log supergraph and subgraphs requests/responses + when_header: + - name: apollo-router-log-request + value: my_client + headers: true # default: false + body: true # default: false + # log request for all requests/responses headers coming from Iphones + - name: user-agent + match: ^Mozilla/5.0 (iPhone* + headers: true +``` + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/2040 + +### Provide multi-arch (amd64/arm64) Docker images for the Router ([Issue #1932](https://github.com/apollographql/router/issues/1932)) + +From 1.5.0 our Docker images will be multi-arch. + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/2138 + +### Add a supergraph configmap option to the helm chart ([PR #2119](https://github.com/apollographql/router/pull/2119)) + +Adds the capability to create a configmap containing your supergraph schema. Here's an example of how you could make use of this from your values.yaml and with the `helm` install command. + +```yaml +extraEnvVars: + - name: APOLLO_ROUTER_SUPERGRAPH_PATH + value: /data/supergraph-schema.graphql + +extraVolumeMounts: + - name: supergraph-schema + mountPath: /data + readOnly: true + +extraVolumes: + - name: supergraph-schema + configMap: + name: "{{ .Release.Name }}-supergraph" + items: + - key: supergraph-schema.graphql + path: supergraph-schema.graphql +``` + +With that values.yaml content, and with your supergraph schema in a file name supergraph-schema.graphql, you can execute: + +``` +helm upgrade --install --create-namespace --namespace router-test --set-file supergraphFile=supergraph-schema.graphql router-test oci://ghcr.io/apollographql/helm-charts/router --version 1.0.0-rc.9 --values values.yaml +``` + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/2119 + +### Configuration upgrades ([Issue #2123](https://github.com/apollographql/router/issues/2123)) + +Occasionally we will make changes to the Router yaml configuration format. +When starting the Router, if the configuration can be upgraded, it will do so automatically and display a warning: + +``` +2022-11-22T14:01:46.884897Z WARN router configuration contains deprecated options: + + 1. telemetry.tracing.trace_config.attributes.router has been renamed to 'supergraph' for consistency + +These will become errors in the future. Run `router config upgrade ` to see a suggested upgraded configuration. +``` + +Note: If a configuration has errors after upgrading then the configuration will not be upgraded automatically. + +From the CLI users can run: +* `router config upgrade ` to output configuration that has been upgraded to match the latest config format. +* `router config upgrade --diff ` to output a diff e.g. +``` + telemetry: + apollo: + client_name_header: apollographql-client-name + metrics: + common: + attributes: +- router: ++ supergraph: + request: + header: + - named: "1" # foo +``` + +There are situations where comments and whitespace are not preserved. + +By [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographql/router/pull/2116, https://github.com/apollographql/router/pull/2162 + +### *Experimental* 🥼 subgraph request retry ([Issue #338](https://github.com/apollographql/router/issues/338), [Issue #1956](https://github.com/apollographql/router/issues/1956)) + +Implements subgraph request retries, using Finagle's retry buckets algorithm: +- it defines a minimal number of retries per second (`min_per_sec`, default is 10 retries per second), to +bootstrap the system or for low traffic deployments +- for each successful request, we add a "token" to the bucket, those tokens expire after `ttl` (default: 10 seconds) +- the number of available additional retries is a part of the number of tokens, defined by `retry_percent` (default is 0.2) + +Request retries are disabled by default on mutations. + +This is activated in the `traffic_shaping` plugin, either globally or per subgraph: + +```yaml +traffic_shaping: + all: + experimental_retry: + min_per_sec: 10 + ttl: 10s + retry_percent: 0.2 + retry_mutations: false + subgraphs: + accounts: + experimental_retry: + min_per_sec: 20 +``` + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2006 and https://github.com/apollographql/router/pull/2160 + +### *Experimental* 🥼 Caching configuration ([Issue #2075](https://github.com/apollographql/router/issues/2075)) + +Split Redis cache configuration for APQ and query planning: + +```yaml +supergraph: + apq: + experimental_cache: + in_memory: + limit: 512 + redis: + urls: ["redis://..."] + query_planning: + experimental_cache: + in_memory: + limit: 512 + redis: + urls: ["redis://..."] +``` + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2155 + +### `@defer` Apollo tracing support ([Issue #1600](https://github.com/apollographql/router/issues/1600)) + +Added Apollo tracing support for queries that use `@defer`. You can now view traces in Apollo Studio as normal. + +By [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographql/router/pull/2190 + +## 🐛 Fixes + +### Fix panic when dev mode enabled with empty config file ([Issue #2182](https://github.com/apollographql/router/issues/2182)) + +If you're running the Router with dev mode with an empty config file, it will no longer panic + +By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/2195 + +### Fix missing apollo tracing variables ([Issue #2186](https://github.com/apollographql/router/issues/2186)) + +Send variable values had no effect. This is now fixed. +```yaml +telemetry: + apollo: + send_variable_values: all +``` + +By [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographql/router/pull/2190 + + +### fix build_docker_image.sh script when using default repo ([PR #2163](https://github.com/apollographql/router/pull/2163)) + +Adding the `-r` flag recently broke the existing functionality to build from the default repo using `-b`. This fixes that. + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/2163 + +### Improve errors when subgraph returns non-GraphQL response with a non-2xx status code ([Issue #2117](https://github.com/apollographql/router/issues/2117)) + +The error response will now contain the status code and status name. Example: `HTTP fetch failed from 'my-service': 401 Unauthorized` + +By [@col](https://github.com/col) in https://github.com/apollographql/router/pull/2118 + +### handle mutations containing `@defer` ([Issue #2099](https://github.com/apollographql/router/issues/2099)) + +The Router generates partial query shapes corresponding to the primary and deferred responses, +to validate the data sent back to the client. Those query shapes were invalid for mutations. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2102 + +### *Experimental* 🥼 APQ and query planner Redis caching fixes ([PR #2176](https://github.com/apollographql/router/pull/2176)) + +* use a null byte as separator in Redis keys +* handle Redis connection errors +* mark APQ and query plan caching as license key functionality + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2176 + +## 🛠 Maintenance + +### Verify that deferred fragment acts as a boundary for nullability rules ([Issue #2169](https://github.com/apollographql/router/issues/2169)) + +Add a test to ensure that deferred fragments act as a boundary for nullability rules. + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/2183 + +### Refactor APQ ([PR #2129](https://github.com/apollographql/router/pull/2129)) + +Remove duplicated code. + +By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2129 + +### Update apollo-rs ([PR #2177](https://github.com/apollographql/router/pull/2177)) + +Updates to new apollo-rs APIs, and fixes some potential panics on unexpected user input. + +By [@goto-bus-stop](https://github.com/goto-bus-stop) in https://github.com/apollographql/router/pull/2177 + +### Semi-automate the release ([PR #2202](https://github.com/apollographql/router/pull/2202)) + +Developers can now run: +`cargo xtask release prepare minor` + +To raise a release PR. + +By [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographql/router/pull/2202 + + +### Fix webpki license check ([PR #2202](https://github.com/apollographql/router/pull/2202)) + +Fixed webpki license check. +Add missing Google Chromimum license. +By [@o0Ignition0o](https://github.com/o0Ignition0o) [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographql/router/pull/2202 + +## 📚 Documentation + +### Docs: Update cors match regex example ([Issue #2151](https://github.com/apollographql/router/issues/2151)) + +The docs CORS regex example now displays a working and safe way to allow `HTTPS` subdomains of `api.example.com`. + +By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/2152 + + +### update documentation to reflect new examples structure ([Issue #2095](https://github.com/apollographql/router/issues/2095)) + +Updated the examples directory structure. This fixes the documentation links to the examples. It also makes clear that rhai subgraph fields are read-only, since they are shared resources. + +By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/2133 + + +### Docs: Add a disclaimer for users who set up health-checks and prometheus endpoints in a containers environment ([Issue #2079](https://github.com/apollographql/router/issues/2079)) + +The health check and the prometheus endpoint listen to 127.0.0.1 by default. +While this is a safe default, it prevents other pods from performing healthchecks and scraping prometheus data. +This behavior and customization is now documented in the [health-checks](https://www.apollographql.com/docs/router/configuration/health-checks) and the [prometheus](https://www.apollographql.com/docs/router/configuration/metrics#using-prometheus) sections. + +By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/2194 + + # [1.4.0] - 2022-11-15 ## 🚀 Features diff --git a/Cargo.lock b/Cargo.lock index 23f64378aa..fd006763c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -162,7 +162,7 @@ dependencies = [ [[package]] name = "apollo-router" -version = "1.4.0" +version = "1.5.0" dependencies = [ "access-json", "ansi_term", @@ -277,7 +277,7 @@ dependencies = [ [[package]] name = "apollo-router-benchmarks" -version = "1.4.0" +version = "1.5.0" dependencies = [ "apollo-router", "async-trait", @@ -293,7 +293,7 @@ dependencies = [ [[package]] name = "apollo-router-scaffold" -version = "1.4.0" +version = "1.5.0" dependencies = [ "anyhow", "cargo-scaffold", diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index eba01bf318..0ed480a744 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -13,6 +13,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## 🐛 Fixes ## 🛠 Maintenance ## 📚 Documentation +## 🥼 Experimental ## Example section entry format @@ -24,328 +25,3 @@ By [@USERNAME](https://github.com/USERNAME) in https://github.com/apollographql/ --> # [x.x.x] (unreleased) - 2022-mm-dd -## ❗ BREAKING ❗ - -### Router debug Docker images now run under the control of heaptrack ([Issue #2135](https://github.com/apollographql/router/issues/2135)) - -From the next release, our debug Docker image will invoke the router under the control of heaptrack. We are making this change to make it simple for users to investigate potential memory issues with the router. - -Do not run debug images in performance sensitive contexts. The tracking of memory allocations will significantly impact performance. In general, the debug image should only be used in consultation with Apollo engineering and support. - -Look at our documentation for examples of how to use the image in either Docker or Kubernetes. - -By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/2142 - -### Fix naming inconsistency of telemetry.metrics.common.attributes.router ([Issue #2076](https://github.com/apollographql/router/issues/2076)) - -Mirroring the rest of the config `router` should be `supergraph` - -```yaml -telemetry: - metrics: - common: - attributes: - router: # old -``` -becomes -```yaml -telemetry: - metrics: - common: - attributes: - supergraph: # new -``` - -By [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographql/router/pull/2116 - -### CLI structure changes ([Issue #2123](https://github.com/apollographql/router/issues/2123)) - -As the Router gains functionality the limitations of the current CLI structure are becoming apparent. - -There is now a separate subcommand for config related operations: -* `config` - * `schema` - Output the configuration schema - * `upgrade` - Upgrade the configuration with optional diff support. - -`router --schema` has been deprecated and users should move to `router config schema`. - -## 🚀 Features - -### Add configuration for trace ID ([Issue #2080](https://github.com/apollographql/router/issues/2080)) - -If you want to expose in response headers the generated trace ID or the one you provided using propagation headers you can use this configuration: - -```yaml title="router.yaml" -telemetry: - tracing: - experimental_response_trace_id: - enabled: true # default: false - header_name: "my-trace-id" # default: "apollo-trace-id" - propagation: - # If you have your own way to generate a trace id and you want to pass it via a custom request header - request: - header_name: my-trace-id -``` - -Using this configuration you will have a response header called `my-trace-id` containing the trace ID. It could help you to debug a specific query if you want to grep your log with this trace id to have more context. - -By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/2131 - -### Add configuration for logging and add more logs - -By default some logs containing sensible data (like request body, response body, headers) are not displayed even if we set the right log level. -For example if you need to display raw responses from one of your subgraph it won't be displayed by default. To enable them you have to configure it thanks to the `when_header` setting in the new section `experimental_logging`. It let's you set different headers to enable more logs (request/response headers/body for supergraph and subgraphs) when the request contains these headers with corresponding values/regex. -Here is an example how you can configure it: - -```yaml title="router.yaml" -telemetry: - experimental_logging: - format: json # By default it's "pretty" if you are in an interactive shell session - display_filename: true # Display filename where the log is coming from. Default: true - display_line_number: false # Display line number in the file where the log is coming from. Default: true - # If one of these headers matches we will log supergraph and subgraphs requests/responses - when_header: - - name: apollo-router-log-request - value: my_client - headers: true # default: false - body: true # default: false - # log request for all requests/responses headers coming from Iphones - - name: user-agent - match: ^Mozilla/5.0 (iPhone* - headers: true -``` - -### Provide multi-arch (amd64/arm64) Docker images for the Router ([Issue #1932](https://github.com/apollographql/router/issues/1932)) - -From the next release, our Docker images will be multi-arch. - -By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/2138 - -### Add a supergraph configmap option to the helm chart ([PR #2119](https://github.com/apollographql/router/pull/2119)) - -Adds the capability to create a configmap containing your supergraph schema. Here's an example of how you could make use of this from your values.yaml and with the `helm` install command. - -```yaml -extraEnvVars: - - name: APOLLO_ROUTER_SUPERGRAPH_PATH - value: /data/supergraph-schema.graphql - -extraVolumeMounts: - - name: supergraph-schema - mountPath: /data - readOnly: true - -extraVolumes: - - name: supergraph-schema - configMap: - name: "{{ .Release.Name }}-supergraph" - items: - - key: supergraph-schema.graphql - path: supergraph-schema.graphql -``` - -With that values.yaml content, and with your supergraph schema in a file name supergraph-schema.graphql, you can execute: - -``` -helm upgrade --install --create-namespace --namespace router-test --set-file supergraphFile=supergraph-schema.graphql router-test oci://ghcr.io/apollographql/helm-charts/router --version 1.0.0-rc.9 --values values.yaml -``` - -By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/2119 - -### Configuration upgrades ([Issue #2123](https://github.com/apollographql/router/issues/2123)) - -Occasionally we will make changes to the Router yaml configuration format. -When starting the Router if the configuration can be upgraded it will do so automatically and display a warning: - -``` -2022-11-22T14:01:46.884897Z WARN router configuration contains deprecated options: - - 1. telemetry.tracing.trace_config.attributes.router has been renamed to 'supergraph' for consistency - -These will become errors in the future. Run `router config upgrade ` to see a suggested upgraded configuration. -``` - -Note: If a configuration has errors after upgrading then the configuration will not be upgraded automatically. - -From the CLI users can run: -* `router config upgrade ` to output configuration that has been upgraded to match the latest config format. -* `router config upgrade --diff ` to output a diff e.g. -``` - telemetry: - apollo: - client_name_header: apollographql-client-name - metrics: - common: - attributes: -- router: -+ supergraph: - request: - header: - - named: "1" # foo -``` - -There are situations where comments and whitespace are not preserved. This may be improved in future. - -By [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographql/router/pull/2116, https://github.com/apollographql/router/pull/2162 - -### *Experimental* 🥼 subgraph request retry ([Issue #338](https://github.com/apollographql/router/issues/338), [Issue #1956](https://github.com/apollographql/router/issues/1956)) - -Implements subgraph request retries, using Finagle's retry buckets algorithm: -- it defines a minimal number of retries per second (`min_per_sec`, default is 10 retries per second), to -bootstrap the system or for low traffic deployments -- for each successful request, we add a "token" to the bucket, those tokens expire after `ttl` (default: 10 seconds) -- the number of available additional retries is a part of the number of tokens, defined by `retry_percent` (default is 0.2) - -Request retries are disabled by default on mutations. - -This is activated in the `traffic_shaping` plugin, either globally or per subgraph: - -```yaml -traffic_shaping: - all: - experimental_retry: - min_per_sec: 10 - ttl: 10s - retry_percent: 0.2 - retry_mutations: false - subgraphs: - accounts: - experimental_retry: - min_per_sec: 20 -``` - -By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2006 and https://github.com/apollographql/router/pull/2160 - -### *Experimental* 🥼 Caching configuration ([Issue #2075](https://github.com/apollographql/router/issues/2075)) - -Split Redis cache configuration for APQ and query planning: - -```yaml -supergraph: - apq: - experimental_cache: - in_memory: - limit: 512 - redis: - urls: ["redis://..."] - query_planning: - experimental_cache: - in_memory: - limit: 512 - redis: - urls: ["redis://..."] -``` - -By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2155 - -### `@defer` Apollo tracing support ([Issue #1600](https://github.com/apollographql/router/issues/1600)) - -Added Apollo tracing support for queries that use `@defer`. You can now view traces in Apollo Studio as normal. - -By [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographql/router/pull/2190 - -## 🐛 Fixes - -### Fix panic when dev mode enabled with empty config file ([Issue #2182](https://github.com/apollographql/router/issues/2182)) - -If you're running the Router with dev mode with an empty config file, it will no longer panic - -By [@bnjjj](https://github.com/bnjjj) in https://github.com/apollographql/router/pull/2195 - -### Fix missing apollo tracing variables ([Issue #2186](https://github.com/apollographql/router/issues/2186)) - -Send variable values had no effect. This is now fixed. -```yaml -telemetry: - apollo: - send_variable_values: all -``` - -By [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographql/router/pull/2190 - - -### fix build_docker_image.sh script when using default repo ([PR #2163](https://github.com/apollographql/router/pull/2163)) - -Adding the `-r` flag recently broke the existing functionality to build from the default repo using `-b`. This fixes that. - -By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/2163 - -### Improve errors when subgraph returns non-GraphQL response with a non-2xx status code ([Issue #2117](https://github.com/apollographql/router/issues/2117)) - -The error response will now contain the status code and status name. Example: `HTTP fetch failed from 'my-service': 401 Unauthorized` - -By [@col](https://github.com/col) in https://github.com/apollographql/router/pull/2118 - -### handle mutations containing @defer ([Issue #2099](https://github.com/apollographql/router/issues/2099)) - -The Router generates partial query shapes corresponding to the primary and deferred responses, -to validate the data sent back to the client. Those query shapes were invalid for mutations. - -By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2102 - -### *Experimental* 🥼 APQ and query planner Redis caching fixes ([PR #2176](https://github.com/apollographql/router/pull/2176)) - -* use a null byte as separator in Redis keys -* handle Redis connection errors -* mark APQ and query plan caching as license key functionality - -By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2176 - -## 🛠 Maintenance - -### Verify that deferred fragment acts as a boundary for nullability rules ([Issue #2169](https://github.com/apollographql/router/issues/2169)) - -Add a test to ensure that deferred fragments act as a boundary for nullability rules. - -By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/2183 - -### Refactor APQ ([PR #2129](https://github.com/apollographql/router/pull/2129)) - -Remove duplicated code. - -By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/2129 - -### Update apollo-rs ([PR #2177](https://github.com/apollographql/router/pull/2177)) - -Updates to new apollo-rs APIs, and fixes some potential panics on unexpected user input. - -By [@goto-bus-stop](https://github.com/goto-bus-stop) in https://github.com/apollographql/router/pull/2177 - -### Semi-automate the release ([PR #2202](https://github.com/apollographql/router/pull/2202)) - -Developers can now run: -`cargo xtask release prepare minor` - -To raise a release PR. - -By [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographql/router/pull/2202 - - -### Fix webpki license check ([PR #2202](https://github.com/apollographql/router/pull/2202)) - -There were issues with webpki license checks. This also meant that we were missing the Google Chromimum license which has als been fixed. -By [@o0Ignition0o](https://github.com/o0Ignition0o) [@bryncooke](https://github.com/bryncooke) in https://github.com/apollographql/router/pull/2202 - -## 📚 Documentation - -### Docs: Update cors match regex example ([Issue #2151](https://github.com/apollographql/router/issues/2151)) - -The docs CORS regex example now displays a working and safe way to allow `HTTPS` subdomains of `api.example.com`. - -By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/2152 - - -### update documentation to reflect new examples structure ([Issue #2095](https://github.com/apollographql/router/issues/2095)) - -We recently updated the examples directory structure. This fixes the documentation links to the examples. It also makes clear that rhai subgraph fields are read-only, since they are shared resources. - -By [@garypen](https://github.com/garypen) in https://github.com/apollographql/router/pull/2133 - - -### Docs: Add a disclaimer for users who set up health-checks and prometheus endpoints in a containers environment ([Issue #2079](https://github.com/apollographql/router/issues/2079)) - -The health check and the prometheus endpoint listen to 127.0.0.1 by default. -While this is a safe default, it prevents other pods from performing healthchecks and scraping prometheus data. -This behavior and customization is now documented in the [health-checks](https://www.apollographql.com/docs/router/configuration/health-checks) and the [prometheus](https://www.apollographql.com/docs/router/configuration/metrics#using-prometheus) sections. - -By [@o0Ignition0o](https://github.com/o0Ignition0o) in https://github.com/apollographql/router/pull/2194 diff --git a/apollo-router-benchmarks/Cargo.toml b/apollo-router-benchmarks/Cargo.toml index 6deedfdb8d..49e47e1ed2 100644 --- a/apollo-router-benchmarks/Cargo.toml +++ b/apollo-router-benchmarks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-benchmarks" -version = "1.4.0" +version = "1.5.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "LicenseRef-ELv2" diff --git a/apollo-router-scaffold/Cargo.toml b/apollo-router-scaffold/Cargo.toml index 2c9cf1af01..e8d9b9061e 100644 --- a/apollo-router-scaffold/Cargo.toml +++ b/apollo-router-scaffold/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router-scaffold" -version = "1.4.0" +version = "1.5.0" authors = ["Apollo Graph, Inc. "] edition = "2021" license = "LicenseRef-ELv2" diff --git a/apollo-router/Cargo.toml b/apollo-router/Cargo.toml index 4aa791b8cf..84f9f5569c 100644 --- a/apollo-router/Cargo.toml +++ b/apollo-router/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apollo-router" -version = "1.4.0" +version = "1.5.0" authors = ["Apollo Graph, Inc. "] repository = "https://github.com/apollographql/router/" documentation = "https://www.apollographql.com/docs/router/" diff --git a/dockerfiles/tracing/docker-compose.datadog.yml b/dockerfiles/tracing/docker-compose.datadog.yml index 7dd43800a4..c2fe3f7100 100644 --- a/dockerfiles/tracing/docker-compose.datadog.yml +++ b/dockerfiles/tracing/docker-compose.datadog.yml @@ -3,7 +3,7 @@ services: apollo-router: container_name: apollo-router - image: ghcr.io/apollographql/router:v1.4.0 + image: ghcr.io/apollographql/router:v1.5.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/datadog.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.jaeger.yml b/dockerfiles/tracing/docker-compose.jaeger.yml index 82cb7ee881..4ff6a379e8 100644 --- a/dockerfiles/tracing/docker-compose.jaeger.yml +++ b/dockerfiles/tracing/docker-compose.jaeger.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router #build: ./router - image: ghcr.io/apollographql/router:v1.4.0 + image: ghcr.io/apollographql/router:v1.5.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/jaeger.router.yaml:/etc/config/configuration.yaml diff --git a/dockerfiles/tracing/docker-compose.zipkin.yml b/dockerfiles/tracing/docker-compose.zipkin.yml index aee074d72c..098a9f43a3 100644 --- a/dockerfiles/tracing/docker-compose.zipkin.yml +++ b/dockerfiles/tracing/docker-compose.zipkin.yml @@ -4,7 +4,7 @@ services: apollo-router: container_name: apollo-router build: ./router - image: ghcr.io/apollographql/router:v1.4.0 + image: ghcr.io/apollographql/router:v1.5.0 volumes: - ./supergraph.graphql:/etc/config/supergraph.graphql - ./router/zipkin.router.yaml:/etc/config/configuration.yaml diff --git a/docs/source/containerization/docker.mdx b/docs/source/containerization/docker.mdx index 530246f380..9314399b5c 100644 --- a/docs/source/containerization/docker.mdx +++ b/docs/source/containerization/docker.mdx @@ -11,7 +11,7 @@ The default behaviour of the router images is suitable for a quickstart or devel Note: The [docker documentation](https://docs.docker.com/engine/reference/run/) for the run command may be helpful when reading through the examples. -Note: The exact image version to use is your choice depending on which release you wish to use. In the following examples, replace `` with your chosen version. e.g.: `v1.4.0` +Note: The exact image version to use is your choice depending on which release you wish to use. In the following examples, replace `` with your chosen version. e.g.: `v1.5.0` ## Override the configuration diff --git a/docs/source/containerization/kubernetes.mdx b/docs/source/containerization/kubernetes.mdx index 3be252c9e0..537015a552 100644 --- a/docs/source/containerization/kubernetes.mdx +++ b/docs/source/containerization/kubernetes.mdx @@ -13,7 +13,7 @@ import { Link } from 'gatsby'; [Helm](https://helm.sh) is the package manager for kubernetes. -There is a complete [helm chart definition](https://github.com/apollographql/router/tree/v1.4.0/helm/chart/router) in the repo which illustrates how to use helm to deploy the router in kubernetes. +There is a complete [helm chart definition](https://github.com/apollographql/router/tree/v1.5.0/helm/chart/router) in the repo which illustrates how to use helm to deploy the router in kubernetes. In both the following examples, we are using helm to install the router: - into namespace "router-deploy" (create namespace if it doesn't exist) @@ -82,7 +82,7 @@ metadata: app.kubernetes.io/version: "v1.4.0" app.kubernetes.io/managed-by: Helm data: - managedFederationApiKey: "UkVEQUNURUQ=" + managedFederationApiKey: "IlJFREFDVEVEIg==" --- # Source: router/templates/configmap.yaml apiVersion: v1 @@ -148,7 +148,7 @@ metadata: app.kubernetes.io/instance: release-name app.kubernetes.io/version: "v1.4.0" app.kubernetes.io/managed-by: Helm - + annotations: prometheus.io/path: /metrics prometheus.io/port: "9090" @@ -186,7 +186,7 @@ spec: key: managedFederationApiKey optional: true - name: APOLLO_GRAPH_REF - value: REDACTED + value: "REDACTED" ports: - name: http containerPort: 80 diff --git a/helm/chart/router/Chart.yaml b/helm/chart/router/Chart.yaml index 13da0d065c..91cd86e321 100644 --- a/helm/chart/router/Chart.yaml +++ b/helm/chart/router/Chart.yaml @@ -19,10 +19,10 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.0.0-rc.8 +version: 1.0.0-rc.9 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. # It is recommended to use it with quotes. -appVersion: "v1.4.0" +appVersion: "v1.5.0" diff --git a/helm/chart/router/README.md b/helm/chart/router/README.md index 201e8645e5..e07074daf9 100644 --- a/helm/chart/router/README.md +++ b/helm/chart/router/README.md @@ -80,6 +80,7 @@ helm show values oci://ghcr.io/apollographql/helm-charts/router | serviceAccount.name | string | `""` | | | serviceMonitor.enabled | bool | `false` | | | serviceentry.enabled | bool | `false` | | +| supergraphFile | string | `nil` | | | tolerations | list | `[]` | | | virtualservice.enabled | bool | `false` | | diff --git a/scripts/install.sh b/scripts/install.sh index c28759d6c5..297d3e3713 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -11,7 +11,7 @@ BINARY_DOWNLOAD_PREFIX="https://github.com/apollographql/router/releases/downloa # Router version defined in apollo-router's Cargo.toml # Note: Change this line manually during the release steps. -PACKAGE_VERSION="v1.4.0" +PACKAGE_VERSION="v1.5.0" download_binary() { downloader --check