From da05a64cc26c4b8f25058f28481ed3ee96e556c9 Mon Sep 17 00:00:00 2001 From: Eric Wittmann Date: Fri, 23 Oct 2020 10:04:46 -0400 Subject: [PATCH 01/31] updated the release workflow to hopefully work on the 1.3.x branch --- .github/project.yaml | 1 + .github/workflows/release.yaml | 25 +++++++++++++------------ 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/.github/project.yaml b/.github/project.yaml index b95d974989..5fed5a9838 100644 --- a/.github/project.yaml +++ b/.github/project.yaml @@ -12,4 +12,5 @@ project-name: Apicurio Registry release-version: 1.3.1.Final snapshot-version: 1.3.2-SNAPSHOT +branch: 1.3.x attempt: 1 diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index b242246a22..d64558fc6a 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -1,7 +1,7 @@ name: Release Workflow on: pull_request: - branches: [master] + branches: [master, 1.3.x] types: [closed] paths: - '.github/project.yaml' @@ -53,8 +53,8 @@ jobs: git config --global user.email "apicurio.ci@gmail.com" git remote add origin "https://apicurio-ci:${{ secrets.ACCESS_TOKEN }}@github.com/$GITHUB_REPOSITORY.git" git fetch - git checkout master - git branch --set-upstream-to=origin/master + git checkout ${{steps.metadata.outputs.branch}} + git branch --set-upstream-to=origin/${{steps.metadata.outputs.branch}} git pull - name: Apicurio Website Checkout run: | @@ -65,8 +65,8 @@ jobs: git config --global user.email "apicurio.ci@gmail.com" git remote add origin "https://apicurio-ci:${{ secrets.ACCESS_TOKEN }}@github.com/Apicurio/apicurio.github.io.git" git fetch - git checkout master - git branch --set-upstream-to=origin/master + git checkout ${{steps.metadata.outputs.branch}} + git branch --set-upstream-to=origin/${{steps.metadata.outputs.branch}} git pull - name: Apicurio Playbook Checkout run: | @@ -77,8 +77,8 @@ jobs: git config --global user.email "apicurio.ci@gmail.com" git remote add origin "https://apicurio-ci:${{ secrets.ACCESS_TOKEN }}@github.com/Apicurio/apicurio-docs-playbook.git" git fetch - git checkout master - git branch --set-upstream-to=origin/master + git checkout ${{steps.metadata.outputs.branch}} + git branch --set-upstream-to=origin/${{steps.metadata.outputs.branch}} git pull - name: Update Release Version ${{steps.metadata.outputs.release-version}} run: | @@ -152,7 +152,7 @@ jobs: run: | cd registry mvn versions:set -DnewVersion=${{steps.metadata.outputs.snapshot-version}} -DgenerateBackupPoms=false -DprocessAllModules=true - sed -i "s/version\:\s.*/version: \'master\'/g" docs/antora.yml + sed -i "s/version\:\s.*/version: \'${{steps.metadata.outputs.branch}}\'/g" docs/antora.yml - name: Commit Snapshot Version ${{steps.metadata.outputs.snapshot-version}} run: | cd registry @@ -223,7 +223,8 @@ jobs: run: ./.github/scripts/verify-docker-release.sh apicurio/apicurio-registry-jpa:${{steps.metadata.outputs.release-version}} apicurio/apicurio-registry-jpa:latest apicurio/apicurio-registry-jpa:latest-release - name: Verify Docker Release For infinispan run: ./.github/scripts/verify-docker-release.sh apicurio/apicurio-registry-infinispan:${{steps.metadata.outputs.release-version}} apicurio/apicurio-registry-infinispan:latest apicurio/apicurio-registry-infinispan:latest-release - - name: Verify Maven Release - run: | - cd .github/test-mvn-deploy - mvn clean install "-Dversion.apicurio=${{steps.metadata.outputs.release-version}}" # Passing the latest version at run-time +# Commented out because artifacts are not immediately available in central +# - name: Verify Maven Release +# run: | +# cd .github/test-mvn-deploy +# mvn clean install "-Dversion.apicurio=${{steps.metadata.outputs.release-version}}" # Passing the latest version at run-time From 4c8bd00e8dddbaf27ed15d227e1813d1370bb9f7 Mon Sep 17 00:00:00 2001 From: Fabian Martinez <46371672+famartinrh@users.noreply.github.com> Date: Fri, 23 Oct 2020 16:23:02 +0200 Subject: [PATCH 02/31] Cherry-pick tests fixes to 1.3.x (#952) * force junit 5.7.0 (#4) * ui tests - fix selenium tests when run in k8s testsuite (#936) * Increase retries on all tests and increase timeouts in some UI tests (#938) --- tests/pom.xml | 5 ++ .../selenium/SeleniumChromeExtension.java | 78 +++++++++++++++++-- .../apicurio/tests/ui/DeleteArtifactIT.java | 5 +- .../registry/utils/tests/TestUtils.java | 2 +- 4 files changed, 79 insertions(+), 11 deletions(-) diff --git a/tests/pom.xml b/tests/pom.xml index 05c5fedce8..640b9fca21 100644 --- a/tests/pom.xml +++ b/tests/pom.xml @@ -23,6 +23,8 @@ ../app/target/apicurio-registry-app-${project.version}-runner + 5.7.0 + 1.7.0 1.3.0.Final 4.9.0 2.3.0 @@ -85,16 +87,19 @@ org.junit.jupiter junit-jupiter + ${junit.version} test org.junit.jupiter junit-jupiter-api + ${junit.version} provided org.junit.platform junit-platform-launcher + ${junit.platform-launcher.version} compile diff --git a/tests/src/test/java/io/apicurio/tests/selenium/SeleniumChromeExtension.java b/tests/src/test/java/io/apicurio/tests/selenium/SeleniumChromeExtension.java index f8aa5f2f4d..044e48adce 100644 --- a/tests/src/test/java/io/apicurio/tests/selenium/SeleniumChromeExtension.java +++ b/tests/src/test/java/io/apicurio/tests/selenium/SeleniumChromeExtension.java @@ -15,12 +15,19 @@ */ package io.apicurio.tests.selenium; +import java.io.IOException; +import java.net.HttpURLConnection; +import java.net.URL; + import org.junit.jupiter.api.extension.AfterAllCallback; import org.junit.jupiter.api.extension.AfterTestExecutionCallback; import org.junit.jupiter.api.extension.BeforeAllCallback; import org.junit.jupiter.api.extension.BeforeTestExecutionCallback; import org.junit.jupiter.api.extension.ExtensionContext; +import org.openqa.selenium.Capabilities; +import org.openqa.selenium.WebDriver; import org.openqa.selenium.chrome.ChromeOptions; +import org.openqa.selenium.remote.RemoteWebDriver; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testcontainers.Testcontainers; @@ -67,23 +74,78 @@ public void beforeTestExecution(ExtensionContext extensionContext) throws Except } } - private void deployChrome() { + private void deployChrome() throws Exception { LOGGER.info("Deploying chrome browser"); - if (!TestUtils.isExternalRegistry()) { + String uiUrl; + WebDriver driver; + if (TestUtils.isExternalRegistry()) { + // we are supposing that if registry is deployed externally selenium will be as well + driver = getRemoteChromeDriver(); + String registrySeleniumHost = System.getenv().getOrDefault("REGISTRY_SELENIUM_HOST", TestUtils.getRegistryHost()); + String registrySeleniumPort = System.getenv().getOrDefault("REGISTRY_SELENIUM_PORT", Integer.toString(TestUtils.getRegistryPort())); + uiUrl = String.format("http://%s:%s/ui", registrySeleniumHost, registrySeleniumPort); + } else { Testcontainers.exposeHostPorts(TestUtils.getRegistryPort()); - } - chrome = new BrowserWebDriverContainer() + uiUrl = TestUtils.getRegistryUIUrl().replace("localhost", "host.testcontainers.internal"); + chrome = new BrowserWebDriverContainer() .withCapabilities(new ChromeOptions()); - chrome.start(); - SeleniumProvider.getInstance().setupDriver(chrome.getWebDriver()); - SeleniumProvider.getInstance().setUiUrl(TestUtils.getRegistryUIUrl().replace("localhost", "host.testcontainers.internal")); + chrome.start(); + driver = chrome.getWebDriver(); + } + SeleniumProvider.getInstance().setupDriver(driver); + SeleniumProvider.getInstance().setUiUrl(uiUrl); deployed = true; } private void deleteChrome() { SeleniumProvider.getInstance().tearDownDrivers(); LOGGER.info("Stopping chrome browser"); - chrome.stop(); + if (!TestUtils.isExternalRegistry()) { + chrome.stop(); + } deployed = false; } + + public static RemoteWebDriver getRemoteChromeDriver() throws Exception { + String seleniumHost = System.getenv().getOrDefault("SELENIUM_HOST", "localhost"); + String seleniumPort = System.getenv().getOrDefault("SELENIUM_PORT", "80"); + ChromeOptions options = new ChromeOptions(); + options.setAcceptInsecureCerts(true); + options.addArguments("test-type", "--headless", "--no-sandbox", "--disable-dev-shm-usage", "--disable-extensions"); + return getRemoteDriver(seleniumHost, seleniumPort, options); + } + + private static RemoteWebDriver getRemoteDriver(String host, String port, Capabilities options) throws Exception { + int attempts = 60; + URL hubUrl = new URL(String.format("http://%s:%s/wd/hub", host, port)); + LOGGER.info("Using remote selenium " + hubUrl); + for (int i = 0; i < attempts; i++) { + try { + testReachable(hubUrl); + return new RemoteWebDriver(hubUrl, options); + } catch (IOException e) { + if (i == attempts - 1) { + LOGGER.warn("Cannot connect to hub", e); + } else { + LOGGER.warn("Cannot connect to hub: {}", e.getMessage()); + } + } + Thread.sleep(2000); + } + throw new IllegalStateException("Selenium webdriver cannot connect to selenium container"); + } + + private static void testReachable(URL url) throws IOException { + LOGGER.info("Trying to connect to {}", url.toString()); + HttpURLConnection urlConnection = null; + try { + urlConnection = (HttpURLConnection) url.openConnection(); + urlConnection.getContent(); + LOGGER.info("Client is able to connect to the selenium hub"); + } finally { + if (urlConnection != null) { + urlConnection.disconnect(); + } + } + } } diff --git a/tests/src/test/java/io/apicurio/tests/ui/DeleteArtifactIT.java b/tests/src/test/java/io/apicurio/tests/ui/DeleteArtifactIT.java index 755c0e6338..8d2396b782 100644 --- a/tests/src/test/java/io/apicurio/tests/ui/DeleteArtifactIT.java +++ b/tests/src/test/java/io/apicurio/tests/ui/DeleteArtifactIT.java @@ -19,6 +19,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; +import java.time.Duration; import java.util.List; import org.junit.jupiter.api.AfterEach; @@ -73,7 +74,7 @@ void testDeleteArtifacts(RegistryService service) throws Exception { page.deleteArtifact(artifactId1); - TestUtils.waitFor("Artifacts list updated", Constants.POLL_INTERVAL, Constants.TIMEOUT_GLOBAL, () -> { + TestUtils.waitFor("Artifacts list updated", Constants.POLL_INTERVAL, Duration.ofSeconds(60).toMillis(), () -> { try { return page.getArtifactsList().size() == 1; } catch (Exception e) { @@ -86,7 +87,7 @@ void testDeleteArtifacts(RegistryService service) throws Exception { page.deleteArtifact(artifactId2); - TestUtils.waitFor("Artifacts list updated", Constants.POLL_INTERVAL, Constants.TIMEOUT_GLOBAL, () -> { + TestUtils.waitFor("Artifacts list updated", Constants.POLL_INTERVAL, Duration.ofSeconds(60).toMillis(), () -> { try { return page.getArtifactsList().size() == 0; } catch (Exception e) { diff --git a/utils/tests/src/main/java/io/apicurio/registry/utils/tests/TestUtils.java b/utils/tests/src/main/java/io/apicurio/registry/utils/tests/TestUtils.java index e2441d57d9..04ae7379da 100644 --- a/utils/tests/src/main/java/io/apicurio/registry/utils/tests/TestUtils.java +++ b/utils/tests/src/main/java/io/apicurio/registry/utils/tests/TestUtils.java @@ -233,7 +233,7 @@ public static void retry(RunnableExc runnable) throws Exception { } public static T retry(Callable callable) throws Exception { - return retry(callable, "Action #" + System.currentTimeMillis(), 5); + return retry(callable, "Action #" + System.currentTimeMillis(), 15); } public static void retry(RunnableExc runnable, String name, int maxRetries) throws Exception { From e8409caf8ecb370a767c91840c620e1265fbc998 Mon Sep 17 00:00:00 2001 From: Eric Wittmann Date: Tue, 27 Oct 2020 10:27:37 -0400 Subject: [PATCH 03/31] Added some documentation about configuring the registry UI (#946) * Added some documentation about configuring the registry UI * renamed user interface to web console * address feedback --- docs/local-test-playbook.yml | 6 ++++ ...bly-installing-the-registry-openshift.adoc | 18 +++++----- .../proc-adding-artifacts-using-console.adoc | 18 +++++----- .../proc-configuring-registry-ui.adoc | 36 +++++++++++++++++++ 4 files changed, 61 insertions(+), 17 deletions(-) create mode 100644 docs/modules/ROOT/partials/getting-started/proc-configuring-registry-ui.adoc diff --git a/docs/local-test-playbook.yml b/docs/local-test-playbook.yml index e769b0a7fc..78430ef69b 100644 --- a/docs/local-test-playbook.yml +++ b/docs/local-test-playbook.yml @@ -14,6 +14,12 @@ ui: url: https://raw.githubusercontent.com/Apicurio/apicurio-docs-ui/master/dist/ui-bundle.zip snapshot: true +runtime: + cache_dir: ./target/antora-cache + +output: + dir: ./target/dist + asciidoc: attributes: plantuml-server-url: 'http://www.plantuml.com/plantuml' diff --git a/docs/modules/ROOT/pages/getting-started/assembly-installing-the-registry-openshift.adoc b/docs/modules/ROOT/pages/getting-started/assembly-installing-the-registry-openshift.adoc index c542e1b664..318961b163 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-installing-the-registry-openshift.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-installing-the-registry-openshift.adoc @@ -5,7 +5,7 @@ include::{mod-loc}shared/all-attributes.adoc[] [id="installing-the-registry"] = Installing {registry} on OpenShift -This chapter explains how to first install {registry} and then how to set up your chosen registry storage option: {kafka-streams}, embedded Infinispan, or PostgreSQL database. +This chapter explains how to first install {registry} and then how to set up your chosen registry storage option: {kafka-streams}, embedded Infinispan, or PostgreSQL database. .Prerequisites * {registry-overview} @@ -13,31 +13,32 @@ This chapter explains how to first install {registry} and then how to set up you .{registry} installation * xref:installing-registry-operatorhub[] //* xref:installing-registry-kafka-streams-template-storage[] +* xref:configuring-registry-ui[] .{kafka-streams} storage -* xref:installing-kafka-streams-operatorhub[] +* xref:installing-kafka-streams-operatorhub[] * xref:setting-up-kafka-streams-storage[] ifdef::apicurio-registry[] -.Embedded Infinispan storage -* xref:setting-up-infinispan-storage[] +.Embedded Infinispan storage +* xref:setting-up-infinispan-storage[] .PostgreSQL database storage * xref:installing-postgresql-operatorhub[] -* xref:setting-up-postgresql-storage[] +* xref:setting-up-postgresql-storage[] endif::[] ifdef::rh-service-registry[] .Embedded Infinispan storage (Technology Preview) -* xref:setting-up-infinispan-storage[] +* xref:setting-up-infinispan-storage[] .PostgreSQL database storage (Technology Preview) * xref:installing-postgresql-operatorhub[] -* xref:setting-up-postgresql-storage[] +* xref:setting-up-postgresql-storage[] [IMPORTANT] ==== -{registry} storage in Infinispan or PostgreSQL is a Technology Preview feature only. Technology Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. Red Hat does not recommend using them in production. +{registry} storage in Infinispan or PostgreSQL is a Technology Preview feature only. Technology Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. Red Hat does not recommend using them in production. These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. For more information about the support scope of Red Hat Technology Preview features, see https://access.redhat.com/support/offerings/techpreview. ==== @@ -53,6 +54,7 @@ endif::[] //INCLUDES //include::{mod-loc}getting-started/proc_installing-registry-kafka-streams-template-storage.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-installing-registry-operatorhub.adoc[leveloffset=+1] +include::{mod-loc}getting-started/proc-configuring-registry-ui.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-installing-kafka-streams-operatorhub.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-setting-up-kafka-streams-storage.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-setting-up-infinispan-storage.adoc[leveloffset=+1] diff --git a/docs/modules/ROOT/partials/getting-started/proc-adding-artifacts-using-console.adoc b/docs/modules/ROOT/partials/getting-started/proc-adding-artifacts-using-console.adoc index bd8352f294..6393279667 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-adding-artifacts-using-console.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-adding-artifacts-using-console.adoc @@ -4,23 +4,23 @@ [id="adding-artifacts-using-console"] = Adding artifacts using the {registry} web console -You can use the {registry} web console to upload event schema and API design artifacts to the registry. For more details on the artifact types that you can upload, see {registry-artifact-types}. This section shows simple examples of uploading {registry} artifacts, applying artifact rules, and adding new artifact versions. +You can use the {registry} web console to upload event schema and API design artifacts to the registry. For more details on the artifact types that you can upload, see {registry-artifact-types}. This section shows simple examples of uploading {registry} artifacts, applying artifact rules, and adding new artifact versions. .Prerequisites -* {registry} must be installed and running in your environment. +* {registry} must be installed and running in your environment. .Procedure -. Connect to the {registry} web console on: +. Connect to the {registry} web console on: + `*\http://MY_REGISTRY_URL/ui*` . Click *Upload Artifact*, and specify the following: ** *ID*: Use the default empty setting to automatically generate an ID, or enter a specific artifact ID. -** *Type*: Use the default *Auto-Detect* setting to automatically detect the artifact type, or select the artifact type from the drop-down, for example, *Avro Schema* or *OpenAPI*. +** *Type*: Use the default *Auto-Detect* setting to automatically detect the artifact type, or select the artifact type from the drop-down, for example, *Avro Schema* or *OpenAPI*. + -NOTE: The {registry} server cannot automatically detect the *JSON Schema* artifact type. You must manually select this artifact type. +NOTE: The {registry} server cannot automatically detect the *Kafka Connect Schema* artifact type. You must manually select this artifact type. ** *Artifact*: Drag and drop or click *Browse* to upload a file, for example, `my-schema.json` or `my-openapi.json`. . Click *Upload* and view the *Artifact Details*: @@ -30,15 +30,15 @@ image::images/getting-started/registry-web-console-artifact.png[Artifact Details + ** *Info*: Displays the artifact name, description, lifecycle status, when created, and last modified. You can click the *Edit Artifact Metadata* pencil icon to edit the artifact name and description or add labels, and click *Download* to download the artifact file locally. Also displays artifact *Content Rules* that you can enable and configure. ** *Documentation* (OpenAPI only): Displays automatically-generated REST API documentation. -** *Content*: Displays a read-only view of the full artifact content. +** *Content*: Displays a read-only view of the full artifact content. . In *Content Rules*, click *Enable* to configure a *Validity Rule* or *Compatibility Rule*, and select the appropriate rule configuration from the drop-down. For more details, see {registry-rule-types}. -. Click *Upload new version* to add a new artifact version, and drag and drop or click *Browse* to upload the file, for example, `my-schema.json` or `my-openapi.json`. +. Click *Upload new version* to add a new artifact version, and drag and drop or click *Browse* to upload the file, for example, `my-schema.json` or `my-openapi.json`. -. To delete an artifact, click the trash icon next to *Upload new version*. +. To delete an artifact, click the trash icon next to *Upload new version*. + -WARNING: Deleting an artifact deletes the artifact and all of its versions, and cannot be undone. Artifact versions are immutable and cannot be deleted individually. +WARNING: Deleting an artifact deletes the artifact and all of its versions, and cannot be undone. Artifact versions are immutable and cannot be deleted individually. .Additional resources * xref:browsing-artifacts-using-console[] diff --git a/docs/modules/ROOT/partials/getting-started/proc-configuring-registry-ui.adoc b/docs/modules/ROOT/partials/getting-started/proc-configuring-registry-ui.adoc new file mode 100644 index 0000000000..dba292adf9 --- /dev/null +++ b/docs/modules/ROOT/partials/getting-started/proc-configuring-registry-ui.adoc @@ -0,0 +1,36 @@ + +[id="configuring-registry-ui"] += Configuring {registry} web console + +You can configure the {registry} web console in a number of ways, either to customize its behavior or to properly +configure it for your deployment environment. + +== Configuring {registry} web console for deployment environment + +When a user navigates their browser to the {registry} web console, some initial configuration settings are loaded. +Two important configuration properties are: + +* URL of the back-end API +* URL of the front-end web console + +Typically {registry} will automatically detect and generate these settings, but there are some deployment environments +where this automatic detection can fail. When this happens, you can configure the following environment variables to +explicitly set them: + +* *_REGISTRY_UI_CONFIG_APIURL_* : set to override the URL to the back-end API (example https://registry.my-domain.com/api) +* *_REGISTRY_UI_CONFIG_UIURL_* : set to override the URL to the front-end web console (example https://registry.my-domain.com/ui) + +== Configuring {registry} console for read-only mode + +An optional feature that can be enabled in {registry} is the ability to put the web console into "Read Only" +mode. This mode disables all of the features in the web console that would allow a user to make changes to +registered artifacts. This includes (but is not limited to): + +* Creating an artifact +* Uploading a new version of an artifact +* Updating an artifact's metadata +* Deleting an artifact + +To put the web console into read only mode, set the following environment variable: + +* *_REGISTRY_UI_FEATURES_READONLY_* : set to `true` to enable "Read Only" mode (default `false`) From c54c6051800199581ce4a3afee98cce0efc02619 Mon Sep 17 00:00:00 2001 From: Eric Wittmann Date: Tue, 27 Oct 2020 10:29:35 -0400 Subject: [PATCH 04/31] added some documentation about artifact meta-data and custom properties (#950) * added some documentation about artifact meta-data and custom properties * address review feedback --- .../con-registry-artifact-metadata.adoc | 67 +++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 docs/modules/ROOT/partials/getting-started/con-registry-artifact-metadata.adoc diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-artifact-metadata.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-artifact-metadata.adoc new file mode 100644 index 0000000000..cd4a9596d3 --- /dev/null +++ b/docs/modules/ROOT/partials/getting-started/con-registry-artifact-metadata.adoc @@ -0,0 +1,67 @@ +// Metadata created by nebel + +[id="registry-artifact-metadata"] += {registry} artifact metadata + +Whenever an item is added to {registry}, a set of metadata properties is stored along with the item content. This +metadata consists of a set of generated, read-only properties along with some properties that can be set by the user. + +.{registry} metadata properties +[%header,cols=3*] +|=== +|Property +|Type +|Editable +|`id` +a| string +a| false +|`type` +a| ArtifactType +a| false +|`state` +a| ArtifactState +a| true +|`version` +a| integer +a| false +|`createdBy` +a| string +a| false +|`createdOn` +a| date +a| false +|`modifiedBy` +a| string +a| false +|`modifiedOn` +a| date +a| false +|`name` +a| string +a| true +|`description` +a| string +a| true +|`labels` +a| array of string +a| true +|`properties` +a| map +a| true +|=== + +== Updating artifact metadata + +The set of editable properties can be updated via the REST API, using the metadata endpoint(s). Please see the +`/artifacts/{artifactId}/meta` section of the REST API reference for details. + +== Updating artifact state + +It is important to note that the `state` property is editable only by using the state transition API, which allows +users to, for example, mark an artifact as `deprecated` or `disabled`. Please see the `/artifacts/{artifactId}/meta` +section of the REST API reference for details. + +== Custom key-value properties + +{registry} allows users to set arbitrary key-value properties on any artifact. The `properties` property above is +the mechanism that allows this. As a result, any custom metadata properties can be stored with an artifact. From f6e0f797d744d681e5ec0d549d897d909da9a4fb Mon Sep 17 00:00:00 2001 From: Eric Wittmann Date: Wed, 28 Oct 2020 12:37:30 -0400 Subject: [PATCH 05/31] Added avro and json schema serde documentation (#957) * Added avro and json schema serde documentation, mostly to have a place to document the new Avro encoding option * updated docs based on review feedback --- .../assembly-using-kafka-client-serdes.adoc | 12 +- .../con-registry-serdes-concepts.adoc | 31 ++- .../con-registry-serdes-strategy.adoc | 26 +- .../con-registry-serdes-types.adoc | 224 ++++++++++++++++++ .../proc-registry-serdes-config-consumer.adoc | 36 ++- .../proc-registry-serdes-config-producer.adoc | 26 +- .../proc-registry-serdes-config-stream.adoc | 52 ++++ .../proc-registry-serdes-register.adoc | 2 +- 8 files changed, 356 insertions(+), 53 deletions(-) create mode 100644 docs/modules/ROOT/partials/getting-started/con-registry-serdes-types.adoc create mode 100644 docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-stream.adoc diff --git a/docs/modules/ROOT/pages/getting-started/assembly-using-kafka-client-serdes.adoc b/docs/modules/ROOT/pages/getting-started/assembly-using-kafka-client-serdes.adoc index b9f0aa49a0..f4ee40170f 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-using-kafka-client-serdes.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-using-kafka-client-serdes.adoc @@ -6,34 +6,38 @@ include::{mod-loc}shared/all-attributes.adoc[] = Using the Kafka client serializers/deserializers //If the assembly covers a task, start the title with a verb in the gerund form, such as Creating or Configuring. -{registry} provides Kafka client serializers/deserializers for producer and consumer applications. Kafka producer applications use serializers to encode messages that conform to a specific event schema. Kafka consumer applications use deserializers to validate that the messages have been serialized using the correct schema, based on a specific schema ID. This ensures consistent schema use and helps to prevent data errors at runtime. +{registry} provides Kafka client serializers/deserializers for producer and consumer applications. Kafka producer applications use serializers to encode messages that conform to a specific event schema. Kafka consumer applications use deserializers to validate that the messages have been serialized using the correct schema, based on a specific schema ID. This ensures consistent schema use and helps to prevent data errors at runtime. This chapter provides instructions on how to use the Kafka client serializer and deserializer for Apache Avro in your Kafka producer and consumer client applications: * xref:registry-serdes-concepts-serde-{context}[] +* xref:registry-serdes-types-serde-{context}[] * xref:registry-serdes-concepts-strategy-{context}[] * xref:registry-serdes-concepts-constants-{context}[] * xref:registry-serdes-register-{context}[] * xref:registry-serdes-config-consumer-{context}[] * xref:registry-serdes-config-producer-{context}[] +* xref:registry-serdes-config-stream-{context}[] .Prerequisites * You must have read {registry-overview} -* You must have installed {registry}. -* You must have created Kafka producer and consumer client applications. +* You must have installed {registry}. +* You must have created Kafka producer and consumer client applications. + ifdef::rh-service-registry[] -For more details on Kafka client applications, see link:https://access.redhat.com/documentation/en-us/red_hat_amq/{amq-version}/html/using_amq_streams_on_openshift[Using AMQ Streams on Openshift]. +For more details on Kafka client applications, see link:https://access.redhat.com/documentation/en-us/red_hat_amq/{amq-version}/html/using_amq_streams_on_openshift[Using AMQ Streams on Openshift]. endif::[] //INCLUDES include::{mod-loc}getting-started/con-registry-serdes-concepts.adoc[leveloffset=+1] +include::{mod-loc}getting-started/con-registry-serdes-types.adoc[leveloffset=+1] include::{mod-loc}getting-started/con-registry-serdes-strategy.adoc[leveloffset=+1] include::{mod-loc}getting-started/con-registry-serdes-constants.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-registry-serdes-register.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-registry-serdes-config-consumer.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-registry-serdes-config-producer.adoc[leveloffset=+1] +include::{mod-loc}getting-started/proc-registry-serdes-config-stream.adoc[leveloffset=+1] //.Additional resources (or Next steps) diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-concepts.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-concepts.adoc index b10840f0fd..8d3f010541 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-concepts.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-concepts.adoc @@ -9,8 +9,15 @@ For example, you can store the schemas to serialize and deserialize messages in Schemas can evolve, so you can define rules in {registry}, for example, to ensure that changes to a schema are valid and do not break previous versions used by applications. {registry} checks for compatibility by comparing a modified schema with previous schema versions. -{registry} provides full schema registry support for Avro schemas, -which are used by client applications through Kafka client serializer/deserializer (SerDe) services provided by {registry}. +{registry} provides schema registry support for a number of schema technologies such as: + +* Avro +* Protobuf +* JSON Schema + +These schema technologies can be used by client applications through Kafka client serializer/deserializer (SerDe) +services provided by {registry}. The maturity and usage of the SerDe classes provided by {registry} may vary. See +the type-specific sections below for more details about each. = Producer schema configuration @@ -18,17 +25,19 @@ A producer client application uses a serializer to put the messages that it send To enable a producer to use {registry} for serialization: -* xref:registry-serdes-register-{context}[Define and register your schema with {registry}] +* xref:registry-serdes-register-{context}[Define and register your schema with {registry}] (optional) * xref:registry-serdes-config-producer-{context}[Configure the producer client code]: ** URL of {registry} ** {registry} serializer to use with the messages -** _Strategy_ to look up the schema used for serialization in {registry} +** _Strategy_ to map the Kafka message to an artifact ID in {registry} +** _Strategy_ to look up (or register) the schema used for serialization in {registry} -After registering your schema, when you start Kafka and {registry}, -you can access the schema to format messages sent to the Kafka broker topic by the producer. +After registering your schema, when you start Kafka and {registry}, you can access the schema to format messages +sent to the Kafka broker topic by the producer. Alternatively (depending on configuration), the producer can +automatically register the schema on first use. -If a schema already exists, you can create a new version using the REST API based on compatibility rules defined in {registry}. Versions are used for compatibility checking as a schema evolves.An artifact ID and schema version represents a unique tuple that identifies a schema. +If a schema already exists, you can create a new version using the REST API based on compatibility rules defined in {registry}. Versions are used for compatibility checking as a schema evolves. An artifact ID and schema version represents a unique tuple that identifies a schema. = Consumer schema configuration A consumer client application uses a deserializer to get the messages that it consumes from a specific broker topic into the correct data format. @@ -41,8 +50,12 @@ To enable a consumer to use {registry} for deserialization: ** {registry} deserializer to use with the messages ** Input data stream for deserialization -The schema is then retrieved by the deserializer using a global ID written into the message being consumed. -The message received must, therefore, include a global ID as well as the message data. +The schema is then retrieved by the deserializer using a global ID written into the message being consumed. The schema +global ID can be located in the message headers or in the message payload itself, depending on the configuration of +the producer application. + +When locating the global ID in the message payload, the format of the data begins with a magic byte (as a signal to +consumers) followed by the global ID and then the message data as normal. For example: diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc index eb38c0edf1..0bf39f3f52 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc @@ -4,18 +4,34 @@ [id='registry-serdes-concepts-strategy-{context}'] = Strategies to lookup a schema -The Kafka client serializer/deserializer uses a lookup _strategy_ to determine the artifact ID or global ID under which the message schema is registered in {registry}. +The Kafka client serializer uses two lookup _strategies_ to determine the artifact ID and global ID under which the message schema is registered in {registry}. -For a given topic and message, you can use implementations of the following Java classes: +For a given topic and message, you can use implementations of the following Java interfaces: * `ArtifactIdStrategy` to return an artifact ID * `GlobalIdStrategy` to return a global ID -The artifact ID returned depends on whether the _key_ or _value_ in the message is being serialized. +.Artifact ID strategy + +The artifact ID strategy provides a way to map the Kafka topic and message information to the ID of an artifact in +{registry}. The common convention for the mapping is to combine the Kafka topic name with either `key` or `value` +(depending on whether the serializer is being used for the Kafka message key or value). However, alternative +conventions can be used for the mapping, either by using an alternative strategy provided by {registry} or by +creating a custom Java class that implements `io.apicurio.registry.utils.serde.strategy.ArtifactIdStrategy`. + +.Global ID strategy + +The global ID strategy is responsible for locating and identifying the specific *version* of the schema registered +under the artifact ID provided by the artifact ID strategy. Every version of every artifact has a single globally +unique identifier that can be used to retrieve the content of that artifact. As described in a previous section, +that global ID is what gets included in every Kafka message so that a deserializer can properly fetch the schema +from {registry}. The global ID strategy can either lookup an existing artifact version, or it can register one if +not found, depending on which strategy is used. Additionally, you can provide your own strategy by creating a +custom Java class that implements `io.apicurio.registry.utils.serde.strategy.GlobalIdStrategy`. The classes for each strategy are organized in the `io.apicurio.registry.utils.serde.strategy` package. -The default strategy is `TopicIdStrategy`, which looks for {registry} artifacts with the same name as the Kafka topic receiving messages. +The default artifact ID strategy is `TopicIdStrategy`, which looks for {registry} artifacts with the same name as the Kafka topic receiving messages. .Example @@ -29,7 +45,7 @@ public String artifactId(String topic, boolean isKey, T schema) { * The `topic` parameter is the name of the Kafka topic receiving the message. * The `isKey` parameter is _true_ when the message key is being serialized, and _false_ when the message value is being serialized. * The `schema` parameter is the schema of the message being serialized/deserialized. -* The `artifactID` returned is the ID under which the schema is registered in {registry}. +* The `artifactID` returned is the artifact ID under which the schema is registered in {registry}. What lookup strategy you use depends on how and where you store your schema. For example, you might use a strategy that uses a _record ID_ if you have different Kafka topics with the same Avro message type. diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-types.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-types.adoc new file mode 100644 index 0000000000..f5b2d97377 --- /dev/null +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-types.adoc @@ -0,0 +1,224 @@ +// Module included in the following assemblies: +// assembly-using-kafka-client-serdes + +[id='registry-serdes-types-serde-{context}'] += Configuring different SerDe types + +When using a schema technology in your Kafka applications, you must choose which specific schema type to use. Common +options include: + +* Apache Avro +* JSON Schema +* Google Protobuf + +Which schema technology you choose is dependent on use case and preference. Of course Kafka allows you to implement +your own custom serializer and deserializer classes, so you are always free to write your own classes, including +leveraging {registry} functionality by utilizing the REST Client (see the "Using the {registry} Java client" section). + +For your convenience, {registry} provides out of the box SerDe classes for all three of the above schema technologies. +This section of the documentation explains how to configure Kafka applications to use each type. + +Using one of the serializer or deserializer classes provided by {registry} in your Kafka application is as simple +as setting the proper configuration properties. Here are some simple examples of configuring producer and +consumer Kafka applications. + +.Configuring a producer + +[source,java,subs="+quotes,attributes"] +---- +public Producer createKafkaProducer(String kafkaBootstrapServers, String topicName) { + Properties props = new Properties(); + + // Configure standard Kafka settings + props.putIfAbsent(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBootstrapServers); + props.putIfAbsent(ProducerConfig.CLIENT_ID_CONFIG, "Producer-" + topicName); + props.putIfAbsent(ProducerConfig.ACKS_CONFIG, "all"); + + // Use a {registry} provided Kafka Serializer + props.putIfAbsent(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, + io.apicurio.registry.utils.serde.AvroKafkaSerializer.class.getName()); + props.putIfAbsent(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, + io.apicurio.registry.utils.serde.AvroKafkaSerializer.class.getName()); + + // Configure {registry} location + props.putIfAbsent(AbstractKafkaSerDe.REGISTRY_URL_CONFIG_PARAM, REGISTRY_URL); + // Map the topic name (plus -key/value) to the artifactId in the registry + props.putIfAbsent(AbstractKafkaSerializer.REGISTRY_ARTIFACT_ID_STRATEGY_CONFIG_PARAM, + io.apicurio.registry.utils.serde.strategy.TopicIdStrategy.class.getName()); + // Get an existing schema or auto-register if not found + props.putIfAbsent(AbstractKafkaSerializer.REGISTRY_GLOBAL_ID_STRATEGY_CONFIG_PARAM, + io.apicurio.registry.utils.serde.strategy.GetOrCreateIdStrategy.class.getName()); + + // Create the Kafka producer + Producer producer = new KafkaProducer<>(props); + return producer; +} +---- + +.Configuring a consumer + +[source,java,subs="+quotes,attributes"] +---- +public Consumer createKafkaConsumer(String kafkaBootstrapServers, String topicName) { + Properties props = new Properties(); + + // Configure standard Kafka settings + props.putIfAbsent(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBootstrapServers); + props.putIfAbsent(ConsumerConfig.GROUP_ID_CONFIG, "Consumer-" + topicName); + props.putIfAbsent(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"); + props.putIfAbsent(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000"); + props.putIfAbsent(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + + // Use a {registry} provided Kafka Deserializer + props.putIfAbsent(ProducerConfig.KEY_DESERIALIZER_CLASS_CONFIG, + io.apicurio.registry.utils.serde.AvroKafkaDeserializer.class.getName()); + props.putIfAbsent(ProducerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, + io.apicurio.registry.utils.serde.AvroKafkaDeserializer.class.getName()); + + // Configure {registry} location + props.putIfAbsent(AbstractKafkaSerDe.REGISTRY_URL_CONFIG_PARAM, REGISTRY_URL); + // No other configuration needed for the deserializer, because the globalId of the schema + // the deserializer should use is sent as part of the message. So the deserializer simply + // extracts that globalId and uses it to look up the schema from the registry. + + // Create the Kafka Consumer + KafkaConsumer consumer = new KafkaConsumer<>(props); + return consumer; +} +---- + + +== Using Avro SerDe with {registry} + +{registry} provides serializer and deserializer classes for Apache Avro out of the box, to make using Avro as +easy as possible. These classes are: + +* `io.apicurio.registry.utils.serde.AvroKafkaSerializer` +* `io.apicurio.registry.utils.serde.AvroKafkaDeserializer` + +=== Configuring the Avro serializer + +You can configure the Avro serializer class in the following ways: + +* {registry} location as a URL +* Artifact ID strategy (see the "Strategies to lookup a schema" section) +* Global ID strategy (see the "Strategies to lookup a schema" section) +* Global ID location +* Global ID handler +* Avro datum provider +* Avro encoding + +==== *Global ID location* + +The serializer is responsible for passing the unique global ID of the schema as part of the Kafka message so that +consumers can use the right schema for deserialization. The location of that global ID can be in the payload of +the message or in the message headers. The default approach is to pass the global ID in the message payload. If +you want the ID sent in the message headers instead, you can set the following configuration property: + +`props.putIfAbsent(AbstractKafkaSerDe.USE_HEADERS, "true")` + +The property name is `apicurio.registry.use.headers`. + + +==== *Global ID handler* + +You can customize precisely how the global ID is encoded when passing it in the Kafka message body. Set +the configuration property `apicurio.registry.id-handler` to be a class that implements the +`io.apicurio.registry.utils.serde.strategy.IdHandler` interface. {registry} provides two implementations of +that interface: + +* `io.apicurio.registry.utils.serde.strategy.DefaultIdHandler` - stores the ID as an 8 byte long +* `io.apicurio.registry.utils.serde.strategy.Legacy4ByteIdHandler` - stores the ID as an 4 byte int + +{registry} represents the global ID of an artifact as a long, but for legacy reasons (or for compatibility with +other registries or serde classes) you may want to use 4 bytes when sending the ID. + +==== *Avro datum provider* +TBD + +==== *Avro encoding* + +When using Apache Avro to serializer data, it is common to use the Avro binary encoding format. This is so that +the data is encoded in as efficient a format as possible. However, Avro also supports encoding the data as JSON. +Encoding as JSON is useful because it is much easier to inspect the payload of each message, often for logging, +debugging, or other similar use cases. The {registry} Avro serializer can be configured to change the encoding +to JSON from the default (binary). + +Set the Avro encoding to use by configuring the `apicurio.avro.encoding` property. The value must be either +`JSON` or `BINARY`. + + +=== Configuring the Avro deserializer + +You must configure the Avro deserializer class to match the configuration settings of the serializer. As a +result, you can configure the Avro deserializer class in the following ways: + +* {registry} location as a URL +* Global ID handler +* Avro datum provider +* Avro encoding + +See the serializer documentation for the above configuration options - the property names and values are the same. + +NOTE: The following options are *not* needed when configuring the deserializer: + +* Artifact ID strategy +* Global ID strategy +* Global ID location + +The reason these options are not necessary is that the deserializer class can figure this information out from +the message itself. In the case of the two strategies, they are not needed because the serializer is responsible +for sending the global ID of the schema as part of the message. And the location of that global ID is determined +(by the deserializer) by simply checking for the magic byte at the start of the message payload. If that byte is +found then the global ID is read from the message payload (using the configured handler). If the magic byte is +not found, then the global ID is read from the message headers. + +== Using JSON Schema SerDe with {registry} + +{registry} provides serializer and deserializer classes for JSON Schema out of the box, to make using JSON Schema as +easy as possible. These classes are: + +* `io.apicurio.registry.utils.serde.JsonSchemaKafkaSerializer` +* `io.apicurio.registry.utils.serde.JsonSchemaKafkaDeserializer` + +Unlike Apache Avro, JSON Schema is not actually a serialization technology - it is instead a validation +technology. As a result, configuration options for JSON Schema are quite different. For example, there is no +*encoding* option, since data is always encoded as JSON. + +=== Configuring the JSON Schema serializer + +You can configure the JSON Schema serializer class in the following ways: + +* {registry} location as a URL +* Artifact ID strategy (see the "Strategies to lookup a schema" section) +* Global ID strategy (see the "Strategies to lookup a schema" section) +* Validation enabled/disabled + +As you can see, the only non-standard configuration property is whether JSON Schema validation is enabled or +disabled. The validation feature is disabled by default but can be enabled by setting +`apicurio.registry.serdes.json-schema.validation-enabled` to `"true"`. For example: + +`props.putIfAbsent(JsonSchemaSerDeConstants.REGISTRY_JSON_SCHEMA_VALIDATION_ENABLED, "true")` + +=== Configuring the JSON Schema deserializer + +You can configure the JSON Schema deserializer class in the following ways: + +* {registry} location as a URL +* Validation enabled/disabled + +As you can see, the deserializer is very simple to configure. You need to provide the location of {registry} so +that the schema can be loaded. The only other configuration is whether or not to perform validation. These +configuration properties are the same as for the serializer. + +NOTE: Deserializer validation will only work if the serializer passes the global ID in the Kafka message, which +will only happen when validation is enabled in the serializer. + +== Using Protobuf SerDe with {registry} +TBD + +=== Configuring the Protobuf serializer +TBD + +=== Configuring the Protobuf deserializer +TBD diff --git a/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-consumer.adoc b/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-consumer.adoc index fa201ef902..19fb7402a3 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-consumer.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-consumer.adoc @@ -19,30 +19,28 @@ For example: + [source,shell,subs="+quotes,attributes"] ---- -String registryUrl_node1 = PropertiesUtil.property(clientProperties, "registry.url.node1", - "https://my-cluster-service-registry-myproject.example.com/api"); -RegistryService service = RegistryClient.cached(registryUrl); +String registryUrl = "https://registry.example.com/api"; +Properties props = new Properties(); +props.putIfAbsent(AbstractKafkaSerDe.REGISTRY_URL_CONFIG_PARAM, registryUrl); <1> ---- -. Configure the client with the {registry} deserializer service. +. Configure the client with the {registry} deserializer. + For example: + [source,java,subs="+quotes,attributes"] ---- -Deserializer deserializer = new AvroKafkaDeserializer <> ( <1> - service, - new DefaultAvroDatumProvider().setUseSpecificAvroReader(true) -); -Serde logSerde = Serdes.serdeFrom( <2> - new AvroKafkaSerializer<>(service), - deserializer -); -KStream input = builder.stream( <3> - INPUT_TOPIC, - Consumed.with(Serdes.String(), logSerde) -); +// Configure Kafka +props.putIfAbsent(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, SERVERS); +props.putIfAbsent(ConsumerConfig.GROUP_ID_CONFIG, "Consumer-" + TOPIC_NAME); +props.putIfAbsent(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"); +props.putIfAbsent(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000"); +props.putIfAbsent(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); +props.putIfAbsent(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, + AvroKafkaDeserializer.class.getName()); <2> <3> +props.putIfAbsent(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, + AvroKafkaDeserializer.class.getName()); <2> <3> ---- -<1> The deserializer service provided by {registry}. -<2> The deserialization is in _Apache Avro_ JSON format. -<3> The input data for deserialization derived from the topic values consumed by the client. +<1> The {registry} URL. +<2> The deserializer provided by {registry}. +<3> The deserialization is in _Apache Avro_ JSON format. diff --git a/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-producer.adoc b/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-producer.adoc index 651c867c34..e82fb0237b 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-producer.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-producer.adoc @@ -19,27 +19,23 @@ For example: + [source,java,subs="+quotes,attributes"] ---- -String registryUrl_node1 = PropertiesUtil.property(clientProperties, "registry.url.node1", - "https://my-cluster-service-registry-myproject.example.com/api"); -RegistryService service = RegistryClient.cached(registryUrl); +String registryUrl = "https://registry.example.com/api"; +Properties props = new Properties(); +props.putIfAbsent(AbstractKafkaSerDe.REGISTRY_URL_CONFIG_PARAM, registryUrl); <1> ---- -. Configure the client with the serializer services, and the strategy to look up the schema in {registry}. +. Configure the client with the serializer, and the and the strategy to look up the schema in {registry}. + For example: + [source,java,subs="+quotes,attributes"] ---- -String registryUrl_node1 = PropertiesUtil.property(clientProperties, "registry.url.node1", - "https://my-cluster-service-registry-myproject.example.com/api"); - - clientProperties.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, property(clientProperties, CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "my-cluster-kafka-bootstrap:9092")); - clientProperties.put(AbstractKafkaSerDe.REGISTRY_URL_CONFIG_PARAM, registryUrl_node1); <1> - clientProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); <2> - clientProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, AvroKafkaSerializer.class.getName()); <3> - clientProperties.put(AbstractKafkaSerializer.REGISTRY_GLOBAL_ID_STRATEGY_CONFIG_PARAM, FindLatestIdStrategy.class.getName()); <4> +props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "my-cluster-kafka-bootstrap:9092"); +props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, AvroKafkaSerializer.class.getName()); <2> +props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, AvroKafkaSerializer.class.getName()); <3> +props.put(AbstractKafkaSerializer.REGISTRY_GLOBAL_ID_STRATEGY_CONFIG_PARAM, FindLatestIdStrategy.class.getName()); <4> ---- <1> The {registry} URL. -<2> The serializer service for the message _key_ provided by {registry}. -<3> The serializer service for the message _value_ provided by {registry}. -<4> Lookup strategy to find the global ID for the schema. Matches the schema of the message against its global ID (artifact ID and schema version) in {registry}. +<2> The serializer for the message _key_ provided by {registry}. +<3> The serializer for the message _value_ provided by {registry}. +<4> Lookup strategy to find the global ID for the schema. diff --git a/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-stream.adoc b/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-stream.adoc new file mode 100644 index 0000000000..8c433f1267 --- /dev/null +++ b/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-stream.adoc @@ -0,0 +1,52 @@ +// Module included in the following assemblies: +// assembly-using-kafka-client-serdes + +[id='registry-serdes-config-stream-{context}'] += Using a schema from a Kafka Streams application + +This procedure describes how to configure a Kafka Streams client written in Java to use a schema from {registry}. + +.Prerequisites + +* {registry} is installed +* The schema is registered with {registry} + +.Procedure + +. Create and configure a REST client with the {registry} ++ +For example: ++ +[source,shell,subs="+quotes,attributes"] +---- +String registryUrl = "https://registry.example.com/api"; +RegistryService client = RegistryClient.cached(registryUrl); +---- + +. Configure the serializer, deserializer, and create the Kafka Streams client ++ +For example: ++ +[source,java,subs="+quotes,attributes"] +---- +Serializer serializer = new AvroKafkaSerializer<>( <1> + client, + new DefaultAvroDatumProvider().setUseSpecificAvroReader(true) +); +Deserializer deserializer = new AvroKafkaDeserializer <> ( <2> + client, + new DefaultAvroDatumProvider().setUseSpecificAvroReader(true) +); +Serde logSerde = Serdes.serdeFrom( <3> + serializer, + deserializer +); +KStream input = builder.stream( <4> + INPUT_TOPIC, + Consumed.with(Serdes.String(), logSerde) +); +---- +<1> The serializer provided by {registry}. +<2> The deserializer provided by {registry}. +<3> The deserialization is in _Apache Avro_ format. +<4> The Kafka Streams client application. diff --git a/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-register.adoc b/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-register.adoc index 721c576cae..f786b7c776 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-register.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-register.adoc @@ -2,7 +2,7 @@ // assembly-using-kafka-client-serdes [id='registry-serdes-register-{context}'] -= Registering a schema to {registry} += Registering a schema in {registry} After you have defined a schema in the appropriate format, such as _Apache Avro_, you can add the schema to {registry}. From fb80f345ef35bdec78ef7c8fde274c26f988ad31 Mon Sep 17 00:00:00 2001 From: Eric Wittmann Date: Wed, 28 Oct 2020 13:03:45 -0400 Subject: [PATCH 06/31] Update antora.yml --- docs/antora.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/antora.yml b/docs/antora.yml index e3a18ad430..f17826ba24 100644 --- a/docs/antora.yml +++ b/docs/antora.yml @@ -1,6 +1,6 @@ name: apicurio-registry title: Apicurio Registry -version: 'master' +version: '1.3.1.Final' start_ROOT: ROOT:index.adoc nav: - modules/ROOT/nav.adoc From 8bae786f8aa7b8882f2ba353d9ba2745ef9fc631 Mon Sep 17 00:00:00 2001 From: Carles Arnal Date: Thu, 29 Oct 2020 19:55:52 +0100 Subject: [PATCH 07/31] Add registry client documentation (#955) * Add registry client documentation * Improve custom header docs * Update con-registry-client.adoc * Update ref-registry-client.adoc * Update proc-writing-registry-client.adoc * Improve rest client docs * Address comments Co-authored-by: Eric Wittmann --- .../getting-started/con-registry-client.adoc | 20 ++++++++- .../proc-writing-registry-client.adoc | 43 ++++++++++++++----- .../getting-started/ref-registry-client.adoc | 26 +++++++---- 3 files changed, 68 insertions(+), 21 deletions(-) diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-client.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-client.adoc index 750c72849e..538e7275e3 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-client.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-client.adoc @@ -3,6 +3,22 @@ [id="registry-client-intro"] = {registry} Java client overview -You can manage artifacts stored in {registry} using a Java client application. You can create, read, update, or delete artifacts stored in the registry using the {registry} Java client classes... +You can manage artifacts stored in {registry} using a Java client application. You can create, read, update, or delete artifacts stored +in the registry using the {registry} Java client classes. -More TBD +You can access the {registry} Java client by adding the proper dependency to your project, see xref:writing-registry-client[]. + +The {registry} client is autocloseable and is implemented using Retrofit and OkHttp as base libraries. This gives the user the ability to customize its +usage by, for example, adding custom headers or enabling TLS auth support. + +== Enabling TLS support in the client + +You can configure TLS authentication using the following properties: + +* apicurio.registry.request.ssl.truststore.location +* apicurio.registry.request.ssl.truststore.password +* apicurio.registry.request.ssl.truststore.type +* apicurio.registry.request.ssl.keystore.location +* apicurio.registry.request.ssl.keystore.password +* apicurio.registry.request.ssl.keystore.type +* apicurio.registry.request.ssl.key.password diff --git a/docs/modules/ROOT/partials/getting-started/proc-writing-registry-client.adoc b/docs/modules/ROOT/partials/getting-started/proc-writing-registry-client.adoc index f6c61def2c..4254a03c55 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-writing-registry-client.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-writing-registry-client.adoc @@ -4,28 +4,49 @@ [id="writing-registry-client"] = Writing a {registry} client application -This section explains how to manage artifacts stored in {registry} using a Java client application... - -More TBD +This section explains how to manage artifacts stored in {registry} using a Java client application. .Prerequisites - * See {registry-overview} -* {registry} must be installed and running in your environment +* {registry} must be installed and running in your environment. +* Add the following dependency to your Maven project: -.Procedure +[source,xml,subs="+quotes,attributes"] +---- + + io.apicurio + apicurio-registry-rest-client + ${apicurio-registry.version} + +---- -. Enter step one here. +.Procedure +. Create a registry client + [source,java,subs="+quotes,attributes"] ---- -Insert client code sample here <1> -... +public class ClientExample { + + private static final RegistryRestClient client; + + public static void main(String[] args) throws Exception { + // Create a Service Registry client + String registryUrl = "http://localhost:8080/api/"; + RegistryRestClient client = RegistryRestClientFactory.create(registryUrl); <1> + } +} ---- -<1> Describe the client code sample here +<1> For more options on how to create a {registry} client, see {registry-client-types}. + +. Once created, all the operations from the {registry} REST API are available through the client. For details about the available +operations, see the REST API documentation. + + +The {registry} Java client extends the interface Autocloseable. .Additional resources -* For an example Java client application, see https://github.com/Apicurio/apicurio-registry-demo. +* For more examples on how to use or customize the {registry} client see https://github.com/Apicurio/apicurio-registry-examples/blob/master/rest-client + ifdef::rh-service-registry[] * For details on how to use the {registry} Kafka client serializer/deserializer for Apache Avro in AMQ Streams producer and consumer applications, see link:https://access.redhat.com/documentation/en-us/red_hat_amq/{amq-version}/html/using_amq_streams_on_openshift/service-registry-str[Using AMQ Streams on Openshift]. diff --git a/docs/modules/ROOT/partials/getting-started/ref-registry-client.adoc b/docs/modules/ROOT/partials/getting-started/ref-registry-client.adoc index 881361d900..4109ffc7f7 100644 --- a/docs/modules/ROOT/partials/getting-started/ref-registry-client.adoc +++ b/docs/modules/ROOT/partials/getting-started/ref-registry-client.adoc @@ -3,15 +3,25 @@ [id="registry-client-types"] = {registry} Java client reference -The {registry} Java client includes the following... +The {registry} Java client includes the following configuration options, based on the client factory. -More TBD - -.{registry} Java client types -[%header,cols=2*] +.{registry} Java client options +[%header,cols=3*] |=== -|Type +|Option |Description -|`TBD` -| TBD +|Arguments +|Plain Client +|Basic REST client used to interact with a running registry. +|baseUrl +|Custom HTTP client +|Registry client using an OkHttpClient provided by the user. +|baseUrl, okhttpClient +|Custom Configuration +|Registry client that accepts a map containing custom configuration. This is useful, for example, to add custom headers to the calls. +|baseUrl, Map configs |=== + + +In order to configure custom headers, the prefix *apicurio.registry.request.headers* must be added to the configs map key, for example, a key *apicurio.registry.request.headers.Authorization* with value Basic: xxxxx would result in a header of *Authorization* with value Basic: xxxxx. + From 35b2dd1bb3453da51894e6133569c2d839ce2bf8 Mon Sep 17 00:00:00 2001 From: Eric Wittmann Date: Thu, 29 Oct 2020 15:14:04 -0400 Subject: [PATCH 08/31] Various maven related documentation changes (#964) * re-organized the maven plugin docs a bit and added some info about "test-update" and file extensions * incorporate feedback from review --- docs/modules/ROOT/nav.adoc | 2 + ...embly-managing-registry-artifacts-api.adoc | 4 +- ...bly-managing-registry-artifacts-maven.adoc | 13 +++ ...managing-artifacts-using-maven-plugin.adoc | 108 ++++++++++++------ 4 files changed, 92 insertions(+), 35 deletions(-) create mode 100644 docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-maven.adoc diff --git a/docs/modules/ROOT/nav.adoc b/docs/modules/ROOT/nav.adoc index 940c2f28e2..575808a7eb 100644 --- a/docs/modules/ROOT/nav.adoc +++ b/docs/modules/ROOT/nav.adoc @@ -6,6 +6,8 @@ endif:getting-started/:[] * xref:getting-started/assembly-installing-the-registry-openshift.adoc[] * xref:getting-started/assembly-configuring-the-registry.adoc[] * xref:getting-started/assembly-managing-registry-artifacts-ui.adoc[] +* xref:getting-started/assembly-managing-registry-artifacts-api.adoc[] +* xref:getting-started/assembly-managing-registry-artifacts-maven.adoc[] * xref:getting-started/assembly-using-kafka-client-serdes.adoc[] * xref:getting-started/assembly-using-the-registry-client.adoc[] * xref:getting-started/assembly-registry-reference.adoc[] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-api.adoc b/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-api.adoc index f3e550137c..bc26b6b59d 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-api.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-api.adoc @@ -3,16 +3,14 @@ include::{mod-loc}shared/all-attributes.adoc[] [id="managing-registry-artifacts-api"] -= Managing {registry} content using the REST API += Managing {registry} content using the REST API //If the assembly covers a task, start the title with a verb in the gerund form, such as Creating or Configuring. This chapter explains how to manage artifacts stored in the registry using the Registry REST API. This includes using Registry REST API commands, a Maven plug-in, or a Java client application: * xref:managing-artifacts-using-rest-api[] -* xref:managing-artifacts-using-maven-plugin[] * xref:managing-artifacts-using-client-code[] //INCLUDES include::{mod-loc}getting-started/proc-managing-artifacts-using-rest-api.adoc[leveloffset=+1] -include::{mod-loc}getting-started/proc-managing-artifacts-using-maven-plugin.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-managing-artifacts-using-client-code.adoc[leveloffset=+1] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-maven.adoc b/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-maven.adoc new file mode 100644 index 0000000000..7a7425f40c --- /dev/null +++ b/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-maven.adoc @@ -0,0 +1,13 @@ +// Metadata created by nebel + +include::{mod-loc}shared/all-attributes.adoc[] + +[id="managing-registry-artifacts-maven"] += Managing {registry} content using the Maven plug-in + +This chapter explains how to manage artifacts stored in the registry using the {registry} Maven plug-in. + +* xref:managing-artifacts-using-maven-plugin[] + +//INCLUDES +include::{mod-loc}getting-started/proc-managing-artifacts-using-maven-plugin.adoc[leveloffset=+1] diff --git a/docs/modules/ROOT/partials/getting-started/proc-managing-artifacts-using-maven-plugin.adoc b/docs/modules/ROOT/partials/getting-started/proc-managing-artifacts-using-maven-plugin.adoc index 0bb895aca4..c17da1ddee 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-managing-artifacts-using-maven-plugin.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-managing-artifacts-using-maven-plugin.adoc @@ -12,67 +12,111 @@ * {registry} must be installed and running in your environment * Maven must be installed and configured in your environment -.Procedure +== Registering an artifact using the Maven plug-in + +Probably the most common use case for the Maven plug-in is registering artifacts during a build. You can accomplish +this by using the `register` goal provided. Simply update your Maven `pom.xml` file to use the +`apicurio-registry-maven-plugin` to upload an artifact to {registry}. + +The following example shows registering an Apache Avro schema artifact: -. Update your Maven `pom.xml` file to use the `apicurio-registry-maven-plugin` to upload an artifact to {registry}. The following example shows registering an Apache Avro schema artifact: -+ [source,xml] ---- -io.apicurio -apicurio-registry-maven-plugin -${registry.version} - - - generate-sources - - register <1> - - - http://my-cluster-service-registry-myproject.example.com/api <2> - AVRO - - ${project.basedir}/schemas/schema1.avsc <3> - - - - + io.apicurio + apicurio-registry-maven-plugin + ${registry.version} + + + generate-sources + + register <1> + + + http://my-cluster-service-registry-myproject.example.com/api <2> + AVRO + + ${project.basedir}/schemas/schema1.avsc <3> + + + + ---- -+ <1> Specify `register` as the execution goal to upload an artifact to the registry. <2> You must specify the {registry} URL with the `/api` endpoint. -<3> You can upload multiple artifacts using the artifact ID and location. +<3> You can upload multiple artifacts using the artifact ID and location. + +== Downloading an artifact using the Maven plug-in +You can also use the Maven plug-in to download artifacts from {registry}. This is often useful, for example, when +generating code from a registered schema. + +The following example shows downloading a single schema by its artifact ID. -. You can also update your Maven `pom.xml` file to download a previously registered artifact from {registry}: -+ [source,xml] ---- io.apicurio apicurio-registry-maven-plugin ${registry.version} - + - generate-sources + generate-sources download <1> http://my-cluster-service-registry-myproject.example.com/api <2> - - schema1 <3> - + + schema1 <3> + + .avsc <4> ${project.build.directory} - + ---- -+ <1> Specify `download` as the execution goal. <2> You must specify the {registry} URL with the `/api` endpoint. <3> You can download multiple artifacts to a specified directory using the artifact ID. +<4> The plug-in will automatically try to select an appropriate file extension, but you can override it using ``. + +== Testing an artifact +You may want to simply verify that an artifact can be registered without actually making any changes. This is most +often useful when rules have been configured in {registry}. Testing the artifact will result in a failure if the +artifact content violates any of the configured rules. + +NOTE: Even if the artifact passes the test, no content will be added to {registry}. + +The following example shows testing an Apache Avro schema artifact: + +[source,xml] +---- + + io.apicurio + apicurio-registry-maven-plugin + ${registry.version} + + + generate-sources + + test-update <1> + + + http://my-cluster-service-registry-myproject.example.com/api <2> + AVRO + + ${project.basedir}/schemas/schema1.avsc <3> + + + + + +---- +<1> Specify `test-update` as the execution goal to test an artifact. +<2> You must specify the {registry} URL with the `/api` endpoint. +<3> You can test multiple artifacts using the artifact ID and location. .Additional resources * For more details on the Maven plug-in, see https://github.com/Apicurio/apicurio-registry-demo. From 3df067b2a5d3fc42ade5bdfe3fc5240b6b9b7ecf Mon Sep 17 00:00:00 2001 From: Eric Wittmann Date: Fri, 30 Oct 2020 13:15:43 -0400 Subject: [PATCH 09/31] documented how to override the topic names for kafka storage (#968) * documented how to override the topic names for kafka storage * changes based on review feedback --- ...proc-setting-up-kafka-streams-storage.adoc | 38 +++++++++++++++---- .../ROOT/partials/shared/attributes.adoc | 8 ++-- 2 files changed, 34 insertions(+), 12 deletions(-) diff --git a/docs/modules/ROOT/partials/getting-started/proc-setting-up-kafka-streams-storage.adoc b/docs/modules/ROOT/partials/getting-started/proc-setting-up-kafka-streams-storage.adoc index 06ce3ec738..56f3def9c4 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-setting-up-kafka-streams-storage.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-setting-up-kafka-streams-storage.adoc @@ -25,11 +25,11 @@ ifdef::rh-service-registry[] .. Click *Installed Operators* > *Red Hat Integration - {kafka-streams}*. endif::[] .. Under *Provided APIs* > *Kafka*, click *Create Instance* to create a new Kafka cluster. -.. Edit the custom resource definition as appropriate, and click *Create*. +.. Edit the custom resource definition as appropriate, and click *Create*. + WARNING: The default example creates a cluster with 3 Zookeeper nodes and 3 Kafka nodes with `ephemeral` storage. This temporary storage is suitable for development and testing only, and not for production. For more details, see link:https://access.redhat.com/documentation/en-us/red_hat_amq/{amq-version}/html/using_amq_streams_on_openshift/index?[Using AMQ Streams on OpenShift]. -. After the cluster is ready, click *Provided APIs* > *Kafka* > *my-cluster* > *YAML*. +. After the cluster is ready, click *Provided APIs* > *Kafka* > *my-cluster* > *YAML*. . In the `status` block, make a copy of the `bootstrapServers` value, which you will use later to deploy {registry}. For example: + @@ -49,19 +49,19 @@ status: . Create a Kafka topic to store the {registry} artifacts: + -.. Under *Provided APIs* > *Kafka Topic*, click *Create topic*. +.. Under *Provided APIs* > *Kafka Topic*, click *Create topic*. .. Change the default topic name from `my-topic` to the required `storage-topic`. . Create a Kafka topic to store the {registry} global IDs: .. Under *Provided APIs* > *Kafka Topic*, click *Create topic*. .. Change the default topic name from `my-topic` to the required `global-id-topic`. ifdef::apicurio-registry[] -. Click *Installed Operators* > *{registry}* > *ApicurioRegistry* > *Create ApicurioRegistry*. +. Click *Installed Operators* > *{registry}* > *ApicurioRegistry* > *Create ApicurioRegistry*. endif::[] ifdef::rh-service-registry[] -. Click *Installed Operators* > *Red Hat Integration - {registry}* > *ApicurioRegistry* > *Create ApicurioRegistry*. +. Click *Installed Operators* > *Red Hat Integration - {registry}* > *ApicurioRegistry* > *Create ApicurioRegistry*. endif::[] -. Paste in the following custom resource definition, but use your `bootstrapServers` value that you copied earlier: +. Paste in the following custom resource definition, but use your `bootstrapServers` value that you copied earlier: + [source,yaml] ---- @@ -75,16 +75,38 @@ spec: streams: bootstrapServers: "my-cluster-kafka-bootstrap.my-project.svc:9092" ---- - + . Click *Create* and wait for the {registry} route to be created on OpenShift. . Click *Networking* > *Route* to access the new route for the {registry} web console. For example: + [source] ---- -http://example-apicurioregistry.my-project.my-domain-name.com/ +http://example-apicurioregistry.my-project.my-domain-name.com/ ---- +.Overriding default Kafka topic names +You can change the default names of the Kafka topics that {registry} will use to store data in Kafka. It is sometimes +necessary to do this when sharing the Kafka cluster with other applications that may already be using topics named +`storage-topic` or `global-id-topic`. + +Change the default topic names by overriding them either by setting appropriate environment variables or by +setting appropriate Java system properties: + +[%header,cols=3*] +|=== +|Topic Default +|Environment Variable +|System Property +|`storage-topic` +a| `REGISTRY_STREAMS_TOPOLOGY_STORAGE_TOPIC` +a| `registry.streams.topology.storage.topic` +|`global-id-topic` +a| `REGISTRY_STREAMS_TOPOLOGY_GLOBAL_ID_TOPIC` +a| `registry.streams.topology.global.id.topic` +|=== + + .Additional resources ifdef::apicurio-registry[] diff --git a/docs/modules/ROOT/partials/shared/attributes.adoc b/docs/modules/ROOT/partials/shared/attributes.adoc index cb8ea5133d..8ab4fc8f3d 100644 --- a/docs/modules/ROOT/partials/shared/attributes.adoc +++ b/docs/modules/ROOT/partials/shared/attributes.adoc @@ -19,9 +19,9 @@ :apicurio-registry: :registry: Apicurio Registry :kafka-streams: Strimzi -:registry-version: 1.2 +:registry-version: 1.3 -// downstream +// downstream //:rh-service-registry: //:registry: Service Registry //:ServiceRegistryName: Apicurio Registry @@ -40,7 +40,7 @@ :copy: © :infin: ∞ :mdash: — -:nbsp: +:nbsp: :ndash: – :reg: ® :trade: ™ @@ -51,7 +51,7 @@ include::attributes-links.adoc[] // Download URLs :download-url-registry-container-catalog: https://catalog.redhat.com/software/containers/search :download-url-registry-maven: https://access.redhat.com/jbossnetwork/restricted/listSoftware.html?downloadType=distributions&product=red.hat.integration&version=2020-Q2 -:download-url-registry-source-code: https://access.redhat.com/jbossnetwork/restricted/listSoftware.html?downloadType=distributions&product=red.hat.integration&version=2020-Q2 +:download-url-registry-source-code: https://access.redhat.com/jbossnetwork/restricted/listSoftware.html?downloadType=distributions&product=red.hat.integration&version=2020-Q2 :download-url-registry-kafka-connect: https://access.redhat.com/jbossnetwork/restricted/listSoftware.html?downloadType=distributions&product=red.hat.integration&version=2020-Q2 :download-url-registry-custom-resources: https://access.redhat.com/jbossnetwork/restricted/listSoftware.html?downloadType=distributions&product=red.hat.integration&version=2020-Q2 From 1f397dc0b5343b5a90c1900fcf2dd0b3f9d9839e Mon Sep 17 00:00:00 2001 From: Eric Wittmann Date: Fri, 30 Oct 2020 13:19:29 -0400 Subject: [PATCH 10/31] Added docs for configuring default global rules (#963) * Added docs for configuring default global rules * changes based on review feedback * added feedback from review --- .../getting-started/con-registry-rules.adoc | 64 ++++++++++++++++++- 1 file changed, 63 insertions(+), 1 deletion(-) diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-rules.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-rules.adoc index fcb70a1d2a..e894867af3 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-rules.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-rules.adoc @@ -27,9 +27,71 @@ NOTE: If no rules are configured for an artifact, the set of currently configure [id="registry-rules-work"] = How rules work -Each rule has a name and optional configuration information. The registry storage maintains the list of rules for each artifact and the list of global rules. Each rule in the list consists of a name and a set of configuration properties, which are specific to the rule implementation. +Each rule has a name and optional configuration information. The registry storage maintains the list of rules for each artifact and the list of global rules. Each rule in the list consists of a name and a set of configuration properties, which are specific to the rule implementation. A rule is provided with the content of the current version of the artifact (if one exists) and the new version of the artifact being added. The rule implementation returns true or false depending on whether the artifact passes the rule. If not, the registry reports the reason why in an HTTP error response. Some rules might not use the previous version of the content. For example, compatibility rules use previous versions, but syntax or semantic validity rules do not. .Additional resources For more details, see {registry-rule-types} and {registry-rule-maturity-matrix}. + +[id="registry-rules-config"] += Configuring rules +You can configure rules individually for each artifact, as well as globally. {registry} applies the rules configured +for the specific artifact, but if no rules are configured at that level it applies the globally configured rules. If +no global rules are configured then no rules are applied. + +== Configuring artifact rules +You can configure artifact rules using the REST API or the web console. See the relevant documentation sections +for more information. + +== Configuring global rules +You can configure global rules in several ways: + +* Use the `/rules` operations in the REST API +* Use the web console +* Set default global rules using {registry} application properties + +.Default global rules +You can configure the {registry} at the application level to enable or disable global rules. This allows +configuring of these global rules at installation time without any need for post-install configuration. You +can use the following application property format: + +`registry.rules.global.` + +The following rule names are currently supported: + +* `compatibility` +* `validity` + +The value of the application property must be a valid configuration option that is specific to the rule being +configured. The following is a table of valid values for each rule: + +[%header,cols=2*] +|=== +|Rule +|Value +|*Validity* +a| `FULL` +| +a| `SYNTAX_ONLY` +| +a| `NONE` +|*Compatibility* +a| `BACKWARD` +| +a| `BACKWARD_TRANSITIVE` +| +a| `FORWARD` +| +a| `FORWARD_TRANSITIVE` +| +a| `FULL` +| +a| `FULL_TRANSITIVE` +| +a| `NONE` +|=== + +NOTE: You can configure these application properties as Java system properties or include them in the Quarkus +`application.properties` file. See more information about configuring {registry} elsewhere in this +documentation and also in https://quarkus.io/guides/config#overriding-properties-at-runtime[Quarkus documentation]. From 7928960c5e589c696c5ddbdce4e1549fdacecef3 Mon Sep 17 00:00:00 2001 From: Carles Arnal Date: Fri, 30 Oct 2020 18:20:22 +0100 Subject: [PATCH 11/31] Document artifact states 965 (#967) * Add artifact state docs section * Improve state docs * Address comments * Address comments --- .../assembly-registry-reference.adoc | 7 +++--- .../ref-registry-artifact-states.adoc | 22 +++++++++++++++++++ 2 files changed, 26 insertions(+), 3 deletions(-) create mode 100644 docs/modules/ROOT/partials/getting-started/ref-registry-artifact-states.adoc diff --git a/docs/modules/ROOT/pages/getting-started/assembly-registry-reference.adoc b/docs/modules/ROOT/pages/getting-started/assembly-registry-reference.adoc index 93b80229f9..4be9374d30 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-registry-reference.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-registry-reference.adoc @@ -6,16 +6,17 @@ include::{mod-loc}shared/all-attributes.adoc[] = {registry} reference //If the assembly covers a task, start the title with a verb in the gerund form, such as Creating or Configuring. -This chapter lists the supported artifact types and content rule types that are stored in {registry}. - +This chapter lists the supported artifact types, states and content rule types that are stored in {registry}. * xref:registry-artifact-types[] +* xref:registry-artifact-states[] * xref:registry-rule-types[] * xref:registry-rule-maturity-matrix[] .Additional resources -* For more detailed information on artifact and rule types, see the link:{attachmentsdir}/registry-rest-api.htm[Apicurio Registry REST API documentation] +* For more detailed information on artifact types, states, and rule types, see the link:{attachmentsdir}/registry-rest-api.htm[Apicurio Registry REST API documentation] //INCLUDES include::{mod-loc}getting-started/ref-registry-artifact-types.adoc[leveloffset=+1] +include::{mod-loc}getting-started/ref-registry-artifact-states.adoc[leveloffset=+1] include::{mod-loc}getting-started/ref-registry-rule-types.adoc[leveloffset=+1] include::{mod-loc}getting-started/ref-registry-rule-maturity-matrix.adoc[leveloffset=+1] diff --git a/docs/modules/ROOT/partials/getting-started/ref-registry-artifact-states.adoc b/docs/modules/ROOT/partials/getting-started/ref-registry-artifact-states.adoc new file mode 100644 index 0000000000..9f1f486722 --- /dev/null +++ b/docs/modules/ROOT/partials/getting-started/ref-registry-artifact-states.adoc @@ -0,0 +1,22 @@ +// Metadata created by nebel +// ParentAssemblies: assemblies/getting-started/as_registry-reference.adoc + +[id="registry-artifact-states"] + += {registry} artifact states +These are the valid artifact states in {registry}: + +.{registry} artifact states +[%header,cols=2*] +|=== +|State +|Description +|`ENABLED` +|Basic state, all the operations are available. +|`DISABLED` +|The artifact and its metadata is viewable and searchable using the {registry} web console, but its content cannot be fetched by any client. +|`DEPRECATED` +|The artifact is fully usable but a header is added to the REST API response whenever the artifact content is fetched. The {registry} Rest Client will also log a warning whenever it sees deprecated content. +|=== + + From 4dae0bd5f7c5ca118bf80379f7427d67a75bf97f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ale=C5=A1=20Justin?= Date: Mon, 2 Nov 2020 14:29:40 +0100 Subject: [PATCH 12/31] Add Datum and Protobuf docs. (#971) * Add Datum and Protobuf docs. * Update con-registry-serdes-types.adoc Co-authored-by: Eric Wittmann --- .../con-registry-serdes-types.adoc | 55 +++++++++++++++++-- 1 file changed, 51 insertions(+), 4 deletions(-) diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-types.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-types.adoc index f5b2d97377..a8fbb61487 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-types.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-types.adoc @@ -134,7 +134,19 @@ that interface: other registries or serde classes) you may want to use 4 bytes when sending the ID. ==== *Avro datum provider* -TBD +Avro provides different Datum writers and readers to write and read data. {registry} supports three different types: + +* Generic +* Specific +* Reflect + +{registry}'s `AvroDatumProvider` is the abstraction on which type is then actually used, +where `DefaultAvroDatumProvider` is used by default. + +There are two configuration options you can set: + +* `apicurio.registry.avro-datum-provider` - provide a fully qualified Java classname of the `AvroDatumProvider` implementation, for example `io.apicurio.registry.utils.serde.avro.ReflectAvroDatumProvider` +* `apicurio.registry.use-specific-avro-reader` - true or false, to use Specific type when using `DefaultAvroDatumProvider` ==== *Avro encoding* @@ -215,10 +227,45 @@ NOTE: Deserializer validation will only work if the serializer passes the global will only happen when validation is enabled in the serializer. == Using Protobuf SerDe with {registry} -TBD + +{registry} provides serializer and deserializer classes for Google Protobuf out of the box, to make using Protobuf as +easy as possible. These classes are: + +* `io.apicurio.registry.utils.serde.ProtobufKafkaSerializer` +* `io.apicurio.registry.utils.serde.ProtobufKafkaDeserializer` === Configuring the Protobuf serializer -TBD + +You can configure the Protobuf serializer class in the following ways: + +* {registry} location as a URL +* Artifact ID strategy (see the "Strategies to lookup a schema" section) +* Global ID strategy (see the "Strategies to lookup a schema" section) +* Global ID location +* Global ID handler === Configuring the Protobuf deserializer -TBD + +You must configure the Protobuf deserializer class to match the configuration settings of the serializer. As a +result, you can configure the Protobuf deserializer class in the following ways: + +* {registry} location as a URL +* Global ID handler + +See the serializer documentation for the above configuration options - the property names and values are the same. + +NOTE: The following options are *not* needed when configuring the deserializer: + +* Artifact ID strategy +* Global ID strategy +* Global ID location + +The reason these options are not necessary is that the deserializer class can figure this information out from +the message itself. In the case of the two strategies, they are not needed because the serializer is responsible +for sending the global ID of the schema as part of the message. And the location of that global ID is determined +(by the deserializer) by simply checking for the magic byte at the start of the message payload. If that byte is +found then the global ID is read from the message payload (using the configured handler). If the magic byte is +not found, then the global ID is read from the message headers. + +NOTE: the Protobuf deserializer doesn't deserialize to your exact Protobuf Message implementation, +but rather to a `DynamicMessage` instance (as there is no appropriate API to do otherwise). From c92f07b8081ff7783cf48cf9784c4b40cd49ebad Mon Sep 17 00:00:00 2001 From: Eric Wittmann Date: Tue, 3 Nov 2020 10:28:19 -0500 Subject: [PATCH 13/31] Update antora.yml --- docs/antora.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/antora.yml b/docs/antora.yml index f17826ba24..e3a18ad430 100644 --- a/docs/antora.yml +++ b/docs/antora.yml @@ -1,6 +1,6 @@ name: apicurio-registry title: Apicurio Registry -version: '1.3.1.Final' +version: 'master' start_ROOT: ROOT:index.adoc nav: - modules/ROOT/nav.adoc From a49ac3b08dd42f25f8d7d0bac13fa7bf633f2a34 Mon Sep 17 00:00:00 2001 From: apicurio-ci Date: Tue, 3 Nov 2020 17:40:39 +0000 Subject: [PATCH 14/31] Automated update to Release Version:: 1.3.2.Final --- app/pom.xml | 2 +- client/pom.xml | 2 +- common/pom.xml | 2 +- common/src/main/resources/META-INF/openapi.json | 2 +- distro/connect-converter/pom.xml | 2 +- distro/docker-compose/pom.xml | 2 +- distro/docker/pom.xml | 2 +- distro/openshift-template/pom.xml | 2 +- distro/pom.xml | 2 +- docs/antora.yml | 2 +- docs/pom.xml | 2 +- docs/rest-api/pom.xml | 2 +- pom.xml | 2 +- rest-client/pom.xml | 2 +- search/client/pom.xml | 2 +- search/connector/pom.xml | 2 +- storage/asyncmem/pom.xml | 2 +- storage/infinispan/pom.xml | 2 +- storage/jpa/pom.xml | 2 +- storage/kafka/pom.xml | 2 +- storage/streams/pom.xml | 2 +- tests/pom.xml | 2 +- ui/pom.xml | 2 +- utils/converter/pom.xml | 2 +- utils/kafka/pom.xml | 2 +- utils/maven-plugin/pom.xml | 2 +- utils/serde/pom.xml | 2 +- utils/streams/pom.xml | 2 +- utils/tests/pom.xml | 2 +- utils/tools/pom.xml | 2 +- 30 files changed, 30 insertions(+), 30 deletions(-) diff --git a/app/pom.xml b/app/pom.xml index eed0e1c4d8..30039e1dd8 100644 --- a/app/pom.xml +++ b/app/pom.xml @@ -6,7 +6,7 @@ io.apicurio apicurio-registry - 1.3.2-SNAPSHOT + 1.3.2.Final ../pom.xml diff --git a/client/pom.xml b/client/pom.xml index 6cd9ca399f..b47903372b 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2-SNAPSHOT + 1.3.2.Final ../pom.xml diff --git a/common/pom.xml b/common/pom.xml index 1492ff22ab..10f2e42639 100644 --- a/common/pom.xml +++ b/common/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2-SNAPSHOT + 1.3.2.Final ../pom.xml diff --git a/common/src/main/resources/META-INF/openapi.json b/common/src/main/resources/META-INF/openapi.json index 0e069984ae..e8450b918c 100644 --- a/common/src/main/resources/META-INF/openapi.json +++ b/common/src/main/resources/META-INF/openapi.json @@ -2,7 +2,7 @@ "openapi": "3.0.2", "info": { "title": "Apicurio Registry API", - "version": "1.3.1.Final", + "version": "1.3.2.Final", "description": "Apicurio Registry is a datastore for standard event schemas and API designs. Apicurio Registry enables developers to manage and share the structure of their data using a REST interface. For example, client applications can dynamically push or pull the latest updates to or from the registry without needing to redeploy. Apicurio Registry also enables developers to create rules that govern how registry content can evolve over time. For example, this includes rules for content validation and version compatibility.\n\nThe Apicurio Registry REST API enables client applications to manage the artifacts in the registry. This API provides create, read, update, and delete operations for schema and API artifacts, rules, versions, and metadata. \n\nThe supported artifact types include:\n- Apache Avro schema\n- AsyncAPI specification\n- Google protocol buffers (schema and file descriptor)\n- GraphQL schema\n- JSON Schema\n- Kafka Connect schema\n- OpenAPI specification\n- Web Services Description Language\n- XML Schema Definition\n\n\n**Note**: The Apicurio Registry REST API is available from `http://MY-REGISTRY-URL/api`. You must prefix all API operation paths with `/api`, for example, `api/ids/{globalId}`.\n", "contact": { "name": "Apicurio", diff --git a/distro/connect-converter/pom.xml b/distro/connect-converter/pom.xml index 748b75f1f2..2d7e49dcd0 100644 --- a/distro/connect-converter/pom.xml +++ b/distro/connect-converter/pom.xml @@ -5,7 +5,7 @@ io.apicurio apicurio-registry-distro - 1.3.2-SNAPSHOT + 1.3.2.Final ../pom.xml diff --git a/distro/docker-compose/pom.xml b/distro/docker-compose/pom.xml index ca7a11f603..d3cb671b92 100644 --- a/distro/docker-compose/pom.xml +++ b/distro/docker-compose/pom.xml @@ -5,7 +5,7 @@ io.apicurio apicurio-registry-distro - 1.3.2-SNAPSHOT + 1.3.2.Final ../pom.xml diff --git a/distro/docker/pom.xml b/distro/docker/pom.xml index a9dc59f9c2..1dbd0c1325 100644 --- a/distro/docker/pom.xml +++ b/distro/docker/pom.xml @@ -5,7 +5,7 @@ io.apicurio apicurio-registry-distro - 1.3.2-SNAPSHOT + 1.3.2.Final ../pom.xml diff --git a/distro/openshift-template/pom.xml b/distro/openshift-template/pom.xml index 85c3ab7cc4..87c9668de0 100644 --- a/distro/openshift-template/pom.xml +++ b/distro/openshift-template/pom.xml @@ -5,7 +5,7 @@ io.apicurio apicurio-registry-distro - 1.3.2-SNAPSHOT + 1.3.2.Final ../pom.xml diff --git a/distro/pom.xml b/distro/pom.xml index d8348b328e..7e638df2aa 100644 --- a/distro/pom.xml +++ b/distro/pom.xml @@ -4,7 +4,7 @@ io.apicurio apicurio-registry - 1.3.2-SNAPSHOT + 1.3.2.Final ../pom.xml apicurio-registry-distro diff --git a/docs/antora.yml b/docs/antora.yml index e3a18ad430..a995c41cd3 100644 --- a/docs/antora.yml +++ b/docs/antora.yml @@ -1,6 +1,6 @@ name: apicurio-registry title: Apicurio Registry -version: 'master' +version: '1.3.2.Final' start_ROOT: ROOT:index.adoc nav: - modules/ROOT/nav.adoc diff --git a/docs/pom.xml b/docs/pom.xml index ed06af065c..6d9d377674 100644 --- a/docs/pom.xml +++ b/docs/pom.xml @@ -8,7 +8,7 @@ io.apicurio apicurio-registry - 1.3.2-SNAPSHOT + 1.3.2.Final ../pom.xml diff --git a/docs/rest-api/pom.xml b/docs/rest-api/pom.xml index 66720b4357..9210fe96d8 100644 --- a/docs/rest-api/pom.xml +++ b/docs/rest-api/pom.xml @@ -8,7 +8,7 @@ io.apicurio apicurio-registry-docs - 1.3.2-SNAPSHOT + 1.3.2.Final ../pom.xml diff --git a/pom.xml b/pom.xml index 6d72c25823..b195cc3a97 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ io.apicurio apicurio-registry - 1.3.2-SNAPSHOT + 1.3.2.Final pom apicurio-registry diff --git a/rest-client/pom.xml b/rest-client/pom.xml index 57aaabdfb2..ce5ccf2eff 100644 --- a/rest-client/pom.xml +++ b/rest-client/pom.xml @@ -5,7 +5,7 @@ io.apicurio apicurio-registry - 1.3.2-SNAPSHOT + 1.3.2.Final ../pom.xml 4.0.0 diff --git a/search/client/pom.xml b/search/client/pom.xml index 15b7ad9162..4262ecc6a1 100644 --- a/search/client/pom.xml +++ b/search/client/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2-SNAPSHOT + 1.3.2.Final ../../pom.xml diff --git a/search/connector/pom.xml b/search/connector/pom.xml index cd4a5ecde3..a99e410ddc 100644 --- a/search/connector/pom.xml +++ b/search/connector/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2-SNAPSHOT + 1.3.2.Final ../../pom.xml diff --git a/storage/asyncmem/pom.xml b/storage/asyncmem/pom.xml index 2e8498383a..aeb5fd7e05 100644 --- a/storage/asyncmem/pom.xml +++ b/storage/asyncmem/pom.xml @@ -6,7 +6,7 @@ io.apicurio apicurio-registry - 1.3.2-SNAPSHOT + 1.3.2.Final ../../pom.xml diff --git a/storage/infinispan/pom.xml b/storage/infinispan/pom.xml index 39a868a852..2b7c8613d7 100644 --- a/storage/infinispan/pom.xml +++ b/storage/infinispan/pom.xml @@ -6,7 +6,7 @@ io.apicurio apicurio-registry - 1.3.2-SNAPSHOT + 1.3.2.Final ../../pom.xml diff --git a/storage/jpa/pom.xml b/storage/jpa/pom.xml index e23ac07c8e..5e49008c2c 100644 --- a/storage/jpa/pom.xml +++ b/storage/jpa/pom.xml @@ -6,7 +6,7 @@ io.apicurio apicurio-registry - 1.3.2-SNAPSHOT + 1.3.2.Final ../../pom.xml diff --git a/storage/kafka/pom.xml b/storage/kafka/pom.xml index 2268d2d5c3..4e43948d75 100644 --- a/storage/kafka/pom.xml +++ b/storage/kafka/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2-SNAPSHOT + 1.3.2.Final ../../pom.xml diff --git a/storage/streams/pom.xml b/storage/streams/pom.xml index c6b825775c..f1e0a4461c 100644 --- a/storage/streams/pom.xml +++ b/storage/streams/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2-SNAPSHOT + 1.3.2.Final ../../pom.xml diff --git a/tests/pom.xml b/tests/pom.xml index 640b9fca21..7b5e003054 100644 --- a/tests/pom.xml +++ b/tests/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2-SNAPSHOT + 1.3.2.Final ../pom.xml apicurio-registry-tests diff --git a/ui/pom.xml b/ui/pom.xml index 42afdd89be..e2950c0f6d 100644 --- a/ui/pom.xml +++ b/ui/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2-SNAPSHOT + 1.3.2.Final ../pom.xml diff --git a/utils/converter/pom.xml b/utils/converter/pom.xml index c07298ba70..7c289dd8f4 100644 --- a/utils/converter/pom.xml +++ b/utils/converter/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2-SNAPSHOT + 1.3.2.Final ../../pom.xml diff --git a/utils/kafka/pom.xml b/utils/kafka/pom.xml index 600eccde5f..5cc2c2de8f 100644 --- a/utils/kafka/pom.xml +++ b/utils/kafka/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2-SNAPSHOT + 1.3.2.Final ../../pom.xml diff --git a/utils/maven-plugin/pom.xml b/utils/maven-plugin/pom.xml index 0a1659682a..e64c310978 100644 --- a/utils/maven-plugin/pom.xml +++ b/utils/maven-plugin/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2-SNAPSHOT + 1.3.2.Final ../../pom.xml diff --git a/utils/serde/pom.xml b/utils/serde/pom.xml index 37fa0e31e3..30bd216733 100644 --- a/utils/serde/pom.xml +++ b/utils/serde/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2-SNAPSHOT + 1.3.2.Final ../../pom.xml diff --git a/utils/streams/pom.xml b/utils/streams/pom.xml index eaed459ac7..47bc6f684f 100644 --- a/utils/streams/pom.xml +++ b/utils/streams/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2-SNAPSHOT + 1.3.2.Final ../../pom.xml diff --git a/utils/tests/pom.xml b/utils/tests/pom.xml index 3a42f0c40f..6d5528025d 100644 --- a/utils/tests/pom.xml +++ b/utils/tests/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2-SNAPSHOT + 1.3.2.Final ../../pom.xml diff --git a/utils/tools/pom.xml b/utils/tools/pom.xml index fa41a43402..0d02bfec36 100644 --- a/utils/tools/pom.xml +++ b/utils/tools/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2-SNAPSHOT + 1.3.2.Final ../../pom.xml From 86c22bf0888b7bb05d6073395bd1b8559e3beedd Mon Sep 17 00:00:00 2001 From: apicurio-ci Date: Tue, 3 Nov 2020 18:01:07 +0000 Subject: [PATCH 15/31] Automated update to next Snapshot Version: 1.3.3-SNAPSHOT --- app/pom.xml | 2 +- client/pom.xml | 2 +- common/pom.xml | 2 +- distro/connect-converter/pom.xml | 2 +- distro/docker-compose/pom.xml | 2 +- distro/docker/pom.xml | 2 +- distro/openshift-template/pom.xml | 2 +- distro/pom.xml | 2 +- docs/antora.yml | 2 +- docs/pom.xml | 2 +- docs/rest-api/pom.xml | 2 +- pom.xml | 2 +- rest-client/pom.xml | 2 +- search/client/pom.xml | 2 +- search/connector/pom.xml | 2 +- storage/asyncmem/pom.xml | 2 +- storage/infinispan/pom.xml | 2 +- storage/jpa/pom.xml | 2 +- storage/kafka/pom.xml | 2 +- storage/streams/pom.xml | 2 +- tests/pom.xml | 2 +- ui/pom.xml | 2 +- utils/converter/pom.xml | 2 +- utils/kafka/pom.xml | 2 +- utils/maven-plugin/pom.xml | 2 +- utils/serde/pom.xml | 2 +- utils/streams/pom.xml | 2 +- utils/tests/pom.xml | 2 +- utils/tools/pom.xml | 2 +- 29 files changed, 29 insertions(+), 29 deletions(-) diff --git a/app/pom.xml b/app/pom.xml index 30039e1dd8..75cfe475f1 100644 --- a/app/pom.xml +++ b/app/pom.xml @@ -6,7 +6,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../pom.xml diff --git a/client/pom.xml b/client/pom.xml index b47903372b..1c89abeb20 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../pom.xml diff --git a/common/pom.xml b/common/pom.xml index 10f2e42639..6ba6d5cdb3 100644 --- a/common/pom.xml +++ b/common/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../pom.xml diff --git a/distro/connect-converter/pom.xml b/distro/connect-converter/pom.xml index 2d7e49dcd0..48ae1f2693 100644 --- a/distro/connect-converter/pom.xml +++ b/distro/connect-converter/pom.xml @@ -5,7 +5,7 @@ io.apicurio apicurio-registry-distro - 1.3.2.Final + 1.3.3-SNAPSHOT ../pom.xml diff --git a/distro/docker-compose/pom.xml b/distro/docker-compose/pom.xml index d3cb671b92..82d4352444 100644 --- a/distro/docker-compose/pom.xml +++ b/distro/docker-compose/pom.xml @@ -5,7 +5,7 @@ io.apicurio apicurio-registry-distro - 1.3.2.Final + 1.3.3-SNAPSHOT ../pom.xml diff --git a/distro/docker/pom.xml b/distro/docker/pom.xml index 1dbd0c1325..a8e08df117 100644 --- a/distro/docker/pom.xml +++ b/distro/docker/pom.xml @@ -5,7 +5,7 @@ io.apicurio apicurio-registry-distro - 1.3.2.Final + 1.3.3-SNAPSHOT ../pom.xml diff --git a/distro/openshift-template/pom.xml b/distro/openshift-template/pom.xml index 87c9668de0..b5c9afba9b 100644 --- a/distro/openshift-template/pom.xml +++ b/distro/openshift-template/pom.xml @@ -5,7 +5,7 @@ io.apicurio apicurio-registry-distro - 1.3.2.Final + 1.3.3-SNAPSHOT ../pom.xml diff --git a/distro/pom.xml b/distro/pom.xml index 7e638df2aa..8000d2521a 100644 --- a/distro/pom.xml +++ b/distro/pom.xml @@ -4,7 +4,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../pom.xml apicurio-registry-distro diff --git a/docs/antora.yml b/docs/antora.yml index a995c41cd3..e3a18ad430 100644 --- a/docs/antora.yml +++ b/docs/antora.yml @@ -1,6 +1,6 @@ name: apicurio-registry title: Apicurio Registry -version: '1.3.2.Final' +version: 'master' start_ROOT: ROOT:index.adoc nav: - modules/ROOT/nav.adoc diff --git a/docs/pom.xml b/docs/pom.xml index 6d9d377674..f3360f08fe 100644 --- a/docs/pom.xml +++ b/docs/pom.xml @@ -8,7 +8,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../pom.xml diff --git a/docs/rest-api/pom.xml b/docs/rest-api/pom.xml index 9210fe96d8..9bb6e266ff 100644 --- a/docs/rest-api/pom.xml +++ b/docs/rest-api/pom.xml @@ -8,7 +8,7 @@ io.apicurio apicurio-registry-docs - 1.3.2.Final + 1.3.3-SNAPSHOT ../pom.xml diff --git a/pom.xml b/pom.xml index b195cc3a97..675138635d 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT pom apicurio-registry diff --git a/rest-client/pom.xml b/rest-client/pom.xml index ce5ccf2eff..c62bb51d46 100644 --- a/rest-client/pom.xml +++ b/rest-client/pom.xml @@ -5,7 +5,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../pom.xml 4.0.0 diff --git a/search/client/pom.xml b/search/client/pom.xml index 4262ecc6a1..18d22ae03d 100644 --- a/search/client/pom.xml +++ b/search/client/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml diff --git a/search/connector/pom.xml b/search/connector/pom.xml index a99e410ddc..005beaf930 100644 --- a/search/connector/pom.xml +++ b/search/connector/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml diff --git a/storage/asyncmem/pom.xml b/storage/asyncmem/pom.xml index aeb5fd7e05..fadddb946e 100644 --- a/storage/asyncmem/pom.xml +++ b/storage/asyncmem/pom.xml @@ -6,7 +6,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml diff --git a/storage/infinispan/pom.xml b/storage/infinispan/pom.xml index 2b7c8613d7..103c85c1f7 100644 --- a/storage/infinispan/pom.xml +++ b/storage/infinispan/pom.xml @@ -6,7 +6,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml diff --git a/storage/jpa/pom.xml b/storage/jpa/pom.xml index 5e49008c2c..9922795708 100644 --- a/storage/jpa/pom.xml +++ b/storage/jpa/pom.xml @@ -6,7 +6,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml diff --git a/storage/kafka/pom.xml b/storage/kafka/pom.xml index 4e43948d75..d160f663d0 100644 --- a/storage/kafka/pom.xml +++ b/storage/kafka/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml diff --git a/storage/streams/pom.xml b/storage/streams/pom.xml index f1e0a4461c..cc328304c6 100644 --- a/storage/streams/pom.xml +++ b/storage/streams/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml diff --git a/tests/pom.xml b/tests/pom.xml index 7b5e003054..25bc5fff0f 100644 --- a/tests/pom.xml +++ b/tests/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../pom.xml apicurio-registry-tests diff --git a/ui/pom.xml b/ui/pom.xml index e2950c0f6d..40e21755b1 100644 --- a/ui/pom.xml +++ b/ui/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../pom.xml diff --git a/utils/converter/pom.xml b/utils/converter/pom.xml index 7c289dd8f4..d020c316dc 100644 --- a/utils/converter/pom.xml +++ b/utils/converter/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml diff --git a/utils/kafka/pom.xml b/utils/kafka/pom.xml index 5cc2c2de8f..3e99b3ae9a 100644 --- a/utils/kafka/pom.xml +++ b/utils/kafka/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml diff --git a/utils/maven-plugin/pom.xml b/utils/maven-plugin/pom.xml index e64c310978..9081f2ba4f 100644 --- a/utils/maven-plugin/pom.xml +++ b/utils/maven-plugin/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml diff --git a/utils/serde/pom.xml b/utils/serde/pom.xml index 30bd216733..27967bef7f 100644 --- a/utils/serde/pom.xml +++ b/utils/serde/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml diff --git a/utils/streams/pom.xml b/utils/streams/pom.xml index 47bc6f684f..76364ac481 100644 --- a/utils/streams/pom.xml +++ b/utils/streams/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml diff --git a/utils/tests/pom.xml b/utils/tests/pom.xml index 6d5528025d..80c9b05534 100644 --- a/utils/tests/pom.xml +++ b/utils/tests/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml diff --git a/utils/tools/pom.xml b/utils/tools/pom.xml index 0d02bfec36..1c1c326257 100644 --- a/utils/tools/pom.xml +++ b/utils/tools/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml From e4d15886fe91a2f0169c03951dba30d81b7717e9 Mon Sep 17 00:00:00 2001 From: Eric Wittmann Date: Tue, 3 Nov 2020 13:06:17 -0500 Subject: [PATCH 16/31] Update antora.yml --- docs/antora.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/antora.yml b/docs/antora.yml index e3a18ad430..a995c41cd3 100644 --- a/docs/antora.yml +++ b/docs/antora.yml @@ -1,6 +1,6 @@ name: apicurio-registry title: Apicurio Registry -version: 'master' +version: '1.3.2.Final' start_ROOT: ROOT:index.adoc nav: - modules/ROOT/nav.adoc From baf90b6b38294e247d8e50044061e5a40a395d49 Mon Sep 17 00:00:00 2001 From: Stephen McCarthy <29098561+smccarthy-ie@users.noreply.github.com> Date: Wed, 11 Nov 2020 20:38:41 +0000 Subject: [PATCH 17/31] clean up registry user docs and restructure (#1004) --- docs/local-test-playbook.yml | 6 +- .../assets/attachments/registry-rest-api.htm | 2 +- docs/modules/ROOT/nav.adoc | 7 +- .../assembly-configuring-the-registry.adoc | 2 +- ... assembly-installing-registry-docker.adoc} | 0 ...ssembly-installing-registry-openshift.adoc | 25 ++++ ...nstalling-registry-storage-openshift.adoc} | 25 +--- .../assembly-intro-to-registry-rules.adoc | 1 - .../assembly-intro-to-the-registry.adoc | 2 - ...embly-managing-registry-artifacts-api.adoc | 9 +- ...bly-managing-registry-artifacts-maven.adoc | 7 +- ...sembly-managing-registry-artifacts-ui.adoc | 2 + .../assembly-registry-reference.adoc | 10 +- .../assembly-using-kafka-client-serdes.adoc | 4 +- .../assembly-using-the-registry-client.adoc | 4 +- docs/modules/ROOT/pages/index.adoc | 21 ++- .../con-kafka-connect-converters.adoc | 2 +- .../con-registry-artifact-metadata.adoc | 67 --------- .../con-registry-artifacts.adoc | 4 +- .../getting-started/con-registry-client.adoc | 22 +-- .../getting-started/con-registry-demo.adoc | 17 ++- .../getting-started/con-registry-distros.adoc | 4 +- .../con-registry-overview.adoc | 2 +- .../con-registry-rest-api.adoc | 2 +- .../getting-started/con-registry-rules.adoc | 41 +++--- .../getting-started/con-registry-serde.adoc | 9 +- .../con-registry-serdes-concepts.adoc | 20 +-- .../con-registry-serdes-strategy.adoc | 34 ++--- .../con-registry-serdes-types.adoc | 137 ++++++++---------- .../getting-started/con-registry-storage.adoc | 7 +- .../con-registry-web-console.adoc | 9 +- .../proc-configuring-registry-ui.adoc | 41 +++--- ...managing-artifacts-using-maven-plugin.adoc | 49 +++---- .../proc-writing-registry-client.adoc | 30 ++-- .../ref-registry-artifact-metadata.adoc | 58 ++++++++ .../getting-started/ref-registry-client.adoc | 34 +++-- .../ref-registry-rule-maturity-matrix.adoc | 13 +- 37 files changed, 360 insertions(+), 369 deletions(-) rename docs/modules/ROOT/pages/getting-started/{assembly-installing-the-registry-docker.adoc => assembly-installing-registry-docker.adoc} (100%) create mode 100644 docs/modules/ROOT/pages/getting-started/assembly-installing-registry-openshift.adoc rename docs/modules/ROOT/pages/getting-started/{assembly-installing-the-registry-openshift.adoc => assembly-installing-registry-storage-openshift.adoc} (64%) delete mode 100644 docs/modules/ROOT/partials/getting-started/con-registry-artifact-metadata.adoc create mode 100644 docs/modules/ROOT/partials/getting-started/ref-registry-artifact-metadata.adoc diff --git a/docs/local-test-playbook.yml b/docs/local-test-playbook.yml index 78430ef69b..00a872b36d 100644 --- a/docs/local-test-playbook.yml +++ b/docs/local-test-playbook.yml @@ -27,9 +27,9 @@ asciidoc: mod-loc: partial$ registry-overview: link:assembly-intro-to-the-registry.html[Introduction to Apicurio Registry] registry-rules: link:assembly-intro-to-registry-rules.html[Introduction to Apicurio Registry rules] - registry-artifact-types: link:assembly-registry-reference.html[Apicurio Registry reference] - registry-rule-types: link:assembly-registry-reference.html[Apicurio Registry reference] - registry-rule-maturity-matrix: link:assembly-registry-reference.html[Apicurio Registry reference] + registry-reference: link:assembly-registry-reference.html[Apicurio Registry artifact reference] managing-registry-artifacts-ui: link:assembly-managing-registry-artifacts-ui.html[Managing Apicurio Registry content using the web console] + managing-registry-artifacts-api: link:assembly-managing-registry-artifacts-api.html[Managing Apicurio Registry content using the REST API] installing-the-registry-docker: link:assembly-installing-the-registry-docker.html[Installing Apicurio Registry using Docker] installing-the-registry-openshift: link:assembly-installing-the-registry-openshift.html[Installing Apicurio Registry on OpenShift] + kafka-client-serdes: link:assembly-using-kafka-client-serdes.html[Validating schemas using Kafka client serializers/deserializers] diff --git a/docs/modules/ROOT/assets/attachments/registry-rest-api.htm b/docs/modules/ROOT/assets/attachments/registry-rest-api.htm index b2a41acc77..08414ab3b7 100644 --- a/docs/modules/ROOT/assets/attachments/registry-rest-api.htm +++ b/docs/modules/ROOT/assets/attachments/registry-rest-api.htm @@ -16,7 +16,7 @@ - + diff --git a/docs/modules/ROOT/nav.adoc b/docs/modules/ROOT/nav.adoc index 575808a7eb..e4fe36299a 100644 --- a/docs/modules/ROOT/nav.adoc +++ b/docs/modules/ROOT/nav.adoc @@ -1,13 +1,14 @@ * xref:getting-started/assembly-intro-to-the-registry.adoc[] * xref:getting-started/assembly-intro-to-registry-rules.adoc[] ifdef:getting-started/:apicurio-registry[] -* xref:getting-started/assembly-installing-the-registry-docker.adoc[] +* xref:getting-started/assembly-installing-registry-docker.adoc[] endif:getting-started/:[] -* xref:getting-started/assembly-installing-the-registry-openshift.adoc[] +* xref:getting-started/assembly-installing-registry-openshift.adoc[] +* xref:getting-started/assembly-installing-registry-storage-openshift.adoc[] * xref:getting-started/assembly-configuring-the-registry.adoc[] * xref:getting-started/assembly-managing-registry-artifacts-ui.adoc[] * xref:getting-started/assembly-managing-registry-artifacts-api.adoc[] * xref:getting-started/assembly-managing-registry-artifacts-maven.adoc[] -* xref:getting-started/assembly-using-kafka-client-serdes.adoc[] * xref:getting-started/assembly-using-the-registry-client.adoc[] +* xref:getting-started/assembly-using-kafka-client-serdes.adoc[] * xref:getting-started/assembly-registry-reference.adoc[] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-configuring-the-registry.adoc b/docs/modules/ROOT/pages/getting-started/assembly-configuring-the-registry.adoc index 73a504f8ee..524b8cd34b 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-configuring-the-registry.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-configuring-the-registry.adoc @@ -2,7 +2,7 @@ include::{mod-loc}shared/all-attributes.adoc[] [id="configuring-the-registry"] -= Configuring {registry} on OpenShift += Managing {registry} deployment on OpenShift This chapter explains how to configure optional settings for {registry} health checks on OpenShift: diff --git a/docs/modules/ROOT/pages/getting-started/assembly-installing-the-registry-docker.adoc b/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-docker.adoc similarity index 100% rename from docs/modules/ROOT/pages/getting-started/assembly-installing-the-registry-docker.adoc rename to docs/modules/ROOT/pages/getting-started/assembly-installing-registry-docker.adoc diff --git a/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-openshift.adoc b/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-openshift.adoc new file mode 100644 index 0000000000..a6b901af1f --- /dev/null +++ b/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-openshift.adoc @@ -0,0 +1,25 @@ +// Metadata created by nebel + +include::{mod-loc}shared/all-attributes.adoc[] + +[id="installing-registry-ocp"] += Installing {registry} on OpenShift + +This chapter explains how to install {registry}: + +* xref:installing-registry-operatorhub[] +//* xref:installing-registry-kafka-streams-template-storage[] + +.Prerequisites +* {registry-overview} + +NOTE: You can install more than one instance of {registry} depending on your environment. The number of instances depends on the number and type of artifacts stored in {registry}, and on your chosen storage option, for example, Kafka Streams, database, Infinispan cluster configuration. + +ifdef::apicurio-registry[] +.Additional resources +* For details on building from source, see https://github.com/Apicurio/apicurio-registry. +endif::[] + +//INCLUDES +//include::{mod-loc}getting-started/proc_installing-registry-kafka-streams-template-storage.adoc[leveloffset=+1] +include::{mod-loc}getting-started/proc-installing-registry-operatorhub.adoc[leveloffset=+1] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-installing-the-registry-openshift.adoc b/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-storage-openshift.adoc similarity index 64% rename from docs/modules/ROOT/pages/getting-started/assembly-installing-the-registry-openshift.adoc rename to docs/modules/ROOT/pages/getting-started/assembly-installing-registry-storage-openshift.adoc index 318961b163..e774fca8f1 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-installing-the-registry-openshift.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-storage-openshift.adoc @@ -1,19 +1,10 @@ // Metadata created by nebel - include::{mod-loc}shared/all-attributes.adoc[] -[id="installing-the-registry"] -= Installing {registry} on OpenShift - -This chapter explains how to first install {registry} and then how to set up your chosen registry storage option: {kafka-streams}, embedded Infinispan, or PostgreSQL database. - -.Prerequisites -* {registry-overview} +[id="installing-registry-storage"] += Installing {registry} storage on OpenShift -.{registry} installation -* xref:installing-registry-operatorhub[] -//* xref:installing-registry-kafka-streams-template-storage[] -* xref:configuring-registry-ui[] +This chapter explains how to install and configure your chosen registry storage option: {kafka-streams}, embedded Infinispan, or PostgreSQL database. .{kafka-streams} storage * xref:installing-kafka-streams-operatorhub[] @@ -44,17 +35,11 @@ These features provide early access to upcoming product features, enabling custo ==== endif::[] -NOTE: You can install more than one instance of {registry} depending on your environment. The number of instances depends on your storage option, for example, your Kafka, Infinispan, or database cluster configuration, and on the number and type of artifacts stored in {registry}. - -ifdef::apicurio-registry[] -.Additional resources -* For details on building from source, see https://github.com/Apicurio/apicurio-registry. -endif::[] +.Prerequisites +* {installing-the-registry-openshift} //INCLUDES //include::{mod-loc}getting-started/proc_installing-registry-kafka-streams-template-storage.adoc[leveloffset=+1] -include::{mod-loc}getting-started/proc-installing-registry-operatorhub.adoc[leveloffset=+1] -include::{mod-loc}getting-started/proc-configuring-registry-ui.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-installing-kafka-streams-operatorhub.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-setting-up-kafka-streams-storage.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-setting-up-infinispan-storage.adoc[leveloffset=+1] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-intro-to-registry-rules.adoc b/docs/modules/ROOT/pages/getting-started/assembly-intro-to-registry-rules.adoc index 9f3ca239d1..09f505ad5f 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-intro-to-registry-rules.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-intro-to-registry-rules.adoc @@ -12,6 +12,5 @@ This chapter introduces the optional rules used to govern registry content and p * xref:registry-rules-apply[] * xref:registry-rules-work[] - //INCLUDES include::{mod-loc}getting-started/con-registry-rules.adoc[leveloffset=+1] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-intro-to-the-registry.adoc b/docs/modules/ROOT/pages/getting-started/assembly-intro-to-the-registry.adoc index bc73274984..2b0a65a0e7 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-intro-to-the-registry.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-intro-to-the-registry.adoc @@ -11,7 +11,6 @@ This chapter introduces {registry} concepts and features and provides details on * xref:registry-overview[] * xref:registry-artifacts[] * xref:registry-web-console[] -* xref:registry-rest-api[] * xref:registry-storage[] * xref:client-serde[] * xref:kafka-connect[] @@ -22,7 +21,6 @@ This chapter introduces {registry} concepts and features and provides details on include::{mod-loc}getting-started/con-registry-overview.adoc[leveloffset=+1] include::{mod-loc}getting-started/con-registry-artifacts.adoc[leveloffset=+1] include::{mod-loc}getting-started/con-registry-web-console.adoc[leveloffset=+1] -include::{mod-loc}getting-started/con-registry-rest-api.adoc[leveloffset=+1] include::{mod-loc}getting-started/con-registry-storage.adoc[leveloffset=+1] include::{mod-loc}getting-started/con-registry-serde.adoc[leveloffset=+1] include::{mod-loc}getting-started/con-kafka-connect-converters.adoc[leveloffset=+1] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-api.adoc b/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-api.adoc index bc26b6b59d..cba301c3bb 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-api.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-api.adoc @@ -6,11 +6,14 @@ include::{mod-loc}shared/all-attributes.adoc[] = Managing {registry} content using the REST API //If the assembly covers a task, start the title with a verb in the gerund form, such as Creating or Configuring. -This chapter explains how to manage artifacts stored in the registry using the Registry REST API. This includes using Registry REST API commands, a Maven plug-in, or a Java client application: +This chapter describes the Registry REST API and shows how to use it manage artifacts stored in the registry: +* xref:registry-rest-api[] * xref:managing-artifacts-using-rest-api[] -* xref:managing-artifacts-using-client-code[] + +.Additional resources +* link:{attachmentsdir}/registry-rest-api.htm[Apicurio Registry REST API documentation] //INCLUDES +include::{mod-loc}getting-started/con-registry-rest-api.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-managing-artifacts-using-rest-api.adoc[leveloffset=+1] -include::{mod-loc}getting-started/proc-managing-artifacts-using-client-code.adoc[leveloffset=+1] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-maven.adoc b/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-maven.adoc index 7a7425f40c..b63a8461ec 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-maven.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-maven.adoc @@ -5,9 +5,14 @@ include::{mod-loc}shared/all-attributes.adoc[] [id="managing-registry-artifacts-maven"] = Managing {registry} content using the Maven plug-in -This chapter explains how to manage artifacts stored in the registry using the {registry} Maven plug-in. +This chapter explains how to manage artifacts stored in the registry using the {registry} Maven plug-in: * xref:managing-artifacts-using-maven-plugin[] +.Prerequisites +* See {registry-overview} +* {registry} must be installed and running in your environment +* Maven must be installed and configured in your environment + //INCLUDES include::{mod-loc}getting-started/proc-managing-artifacts-using-maven-plugin.adoc[leveloffset=+1] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-ui.adoc b/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-ui.adoc index 31363e19a4..33b6ca111a 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-ui.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-ui.adoc @@ -8,11 +8,13 @@ include::{mod-loc}shared/all-attributes.adoc[] This chapter explains how to manage artifacts stored in the registry using the {registry} web console. This includes uploading and browsing registry content, and configuring optional rules: +* xref:configuring-registry-ui[] * xref:adding-artifacts-using-console[] * xref:browsing-artifacts-using-console[] * xref:configuring-rules-using-console[] //INCLUDES +include::{mod-loc}getting-started/proc-configuring-registry-ui.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-adding-artifacts-using-console.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-browsing-artifacts-using-console.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-configuring-rules-using-console.adoc[leveloffset=+1] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-registry-reference.adoc b/docs/modules/ROOT/pages/getting-started/assembly-registry-reference.adoc index 4be9374d30..8319eda9f2 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-registry-reference.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-registry-reference.adoc @@ -3,20 +3,24 @@ include::{mod-loc}shared/all-attributes.adoc[] [id="artifact-and-rule-types"] -= {registry} reference += {registry} artifact reference //If the assembly covers a task, start the title with a verb in the gerund form, such as Creating or Configuring. -This chapter lists the supported artifact types, states and content rule types that are stored in {registry}. +This chapter provides details on the supported artifact types, states, metadata, and content rules that are stored in {registry}. + * xref:registry-artifact-types[] * xref:registry-artifact-states[] +* xref:registry-artifact-metadata[] * xref:registry-rule-types[] * xref:registry-rule-maturity-matrix[] + .Additional resources -* For more detailed information on artifact types, states, and rule types, see the link:{attachmentsdir}/registry-rest-api.htm[Apicurio Registry REST API documentation] +* For more detailed information, see the link:{attachmentsdir}/registry-rest-api.htm[Apicurio Registry REST API documentation] //INCLUDES include::{mod-loc}getting-started/ref-registry-artifact-types.adoc[leveloffset=+1] include::{mod-loc}getting-started/ref-registry-artifact-states.adoc[leveloffset=+1] +include::{mod-loc}getting-started/ref-registry-artifact-metadata.adoc[leveloffset=+1] include::{mod-loc}getting-started/ref-registry-rule-types.adoc[leveloffset=+1] include::{mod-loc}getting-started/ref-registry-rule-maturity-matrix.adoc[leveloffset=+1] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-using-kafka-client-serdes.adoc b/docs/modules/ROOT/pages/getting-started/assembly-using-kafka-client-serdes.adoc index f4ee40170f..361721ae07 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-using-kafka-client-serdes.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-using-kafka-client-serdes.adoc @@ -3,12 +3,12 @@ include::{mod-loc}shared/all-attributes.adoc[] [id="using-kafka-client-serdes"] -= Using the Kafka client serializers/deserializers += Validating schemas using Kafka client serializers/deserializers //If the assembly covers a task, start the title with a verb in the gerund form, such as Creating or Configuring. {registry} provides Kafka client serializers/deserializers for producer and consumer applications. Kafka producer applications use serializers to encode messages that conform to a specific event schema. Kafka consumer applications use deserializers to validate that the messages have been serialized using the correct schema, based on a specific schema ID. This ensures consistent schema use and helps to prevent data errors at runtime. -This chapter provides instructions on how to use the Kafka client serializer and deserializer for Apache Avro in your Kafka producer and consumer client applications: +This chapter provides instructions on how to use the Kafka client serializers and deserializers for Apache Avro, JSON Schema, and Google Protobuf in your Kafka producer and consumer client applications: * xref:registry-serdes-concepts-serde-{context}[] * xref:registry-serdes-types-serde-{context}[] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-using-the-registry-client.adoc b/docs/modules/ROOT/pages/getting-started/assembly-using-the-registry-client.adoc index eff4cc9c82..69af293c4d 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-using-the-registry-client.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-using-the-registry-client.adoc @@ -2,13 +2,13 @@ include::{mod-loc}shared/all-attributes.adoc[] [id="using-the-registry-client"] -= Using the {registry} Java client += Managing {registry} content using the Java client This chapter explains how to use the {registry} Java client: * xref:registry-client-intro[] * xref:writing-registry-client[] -* xref:registry-client-types[] +* xref:registry-client-config[] //INCLUDES include::{mod-loc}getting-started/con-registry-client.adoc[leveloffset=+1] diff --git a/docs/modules/ROOT/pages/index.adoc b/docs/modules/ROOT/pages/index.adoc index a84e93bf7a..95c6d63e7d 100644 --- a/docs/modules/ROOT/pages/index.adoc +++ b/docs/modules/ROOT/pages/index.adoc @@ -1,6 +1,21 @@ include::partial$shared/attributes.adoc[] -= Welcome += {registry} documentation -Welcome to the {registry} version {registry-version} documentation. -Navigate using the left hand menu. \ No newline at end of file +Welcome to the {registry} version {registry-version} documentation. Navigate using the left menu. + +{registry} stores and retrieves API designs and event schemas, and gives you control of their evolution. + + +== About this documentation +This introduces {registry}, explains how to install with your chosen storage option, and how to manage event schemas and API designs using the {registry} web console, REST API, Maven plug-in, or Java client. + +This also explains how to enforce schemas using Kafka client serializers and deserializers in your consumer and producer applications. It also describes {registry} content types and rule types, and OpenShift environment variables for health checks. + +== Getting help with {registry} + +See the link:https://github.com/Apicurio/apicurio-registry[{registry}] and https://github.com/Apicurio/apicurio-registry-operator[{registry} Operator] projects on GitHub. +Any contributions, suggestions, and issue reports are welcome. + +ifdef::apicurio-registry[] +link:https://github.com/Apicurio/apicurio-registry/issues/new[Create an issue] on GitHub if you find any problems. diff --git a/docs/modules/ROOT/partials/getting-started/con-kafka-connect-converters.adoc b/docs/modules/ROOT/partials/getting-started/con-kafka-connect-converters.adoc index 9f8ec22bb6..3b64dcc06f 100644 --- a/docs/modules/ROOT/partials/getting-started/con-kafka-connect-converters.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-kafka-connect-converters.adoc @@ -2,7 +2,7 @@ [id="kafka-connect"] -= Kafka Connect converters += Stream data to external sytems with Kafka Connect converters You can use {registry} with Apache Kafka Connect to stream data between Kafka and external systems. Using Kafka Connect, you can define connectors for different systems to move large volumes of data into and out of Kafka-based systems. .{registry} and Kafka Connect architecture diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-artifact-metadata.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-artifact-metadata.adoc deleted file mode 100644 index cd4a9596d3..0000000000 --- a/docs/modules/ROOT/partials/getting-started/con-registry-artifact-metadata.adoc +++ /dev/null @@ -1,67 +0,0 @@ -// Metadata created by nebel - -[id="registry-artifact-metadata"] -= {registry} artifact metadata - -Whenever an item is added to {registry}, a set of metadata properties is stored along with the item content. This -metadata consists of a set of generated, read-only properties along with some properties that can be set by the user. - -.{registry} metadata properties -[%header,cols=3*] -|=== -|Property -|Type -|Editable -|`id` -a| string -a| false -|`type` -a| ArtifactType -a| false -|`state` -a| ArtifactState -a| true -|`version` -a| integer -a| false -|`createdBy` -a| string -a| false -|`createdOn` -a| date -a| false -|`modifiedBy` -a| string -a| false -|`modifiedOn` -a| date -a| false -|`name` -a| string -a| true -|`description` -a| string -a| true -|`labels` -a| array of string -a| true -|`properties` -a| map -a| true -|=== - -== Updating artifact metadata - -The set of editable properties can be updated via the REST API, using the metadata endpoint(s). Please see the -`/artifacts/{artifactId}/meta` section of the REST API reference for details. - -== Updating artifact state - -It is important to note that the `state` property is editable only by using the state transition API, which allows -users to, for example, mark an artifact as `deprecated` or `disabled`. Please see the `/artifacts/{artifactId}/meta` -section of the REST API reference for details. - -== Custom key-value properties - -{registry} allows users to set arbitrary key-value properties on any artifact. The `properties` property above is -the mechanism that allows this. As a result, any custom metadata properties can be stored with an artifact. diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-artifacts.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-artifacts.adoc index b08d007fb0..b03d211588 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-artifacts.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-artifacts.adoc @@ -1,7 +1,7 @@ // Metadata created by nebel [id="registry-artifacts"] -= {registry} artifacts += Store schema and API artifacts in {registry} The items stored in {registry}, such as event schemas and API specifications, are known as registry _artifacts_. The following shows an example of an Apache Avro schema artifact in JSON format for a simple share price application: @@ -26,4 +26,4 @@ The items stored in {registry}, such as event schemas and API specifications, ar When a schema or API contract is added as an artifact in the registry, client applications can then use that schema or API contract to validate that client messages conform to the correct data structure at runtime. -{registry} supports a wide range of message payload formats for standard event schemas and API specifications. For example, supported formats include Apache Avro, Google protocol buffers, GraphQL, AsyncAPI, OpenAPI, and others. For more details, see {registry-artifact-types}. +{registry} supports a wide range of message payload formats for standard event schemas and API specifications. For example, supported formats include Apache Avro, Google protocol buffers, GraphQL, AsyncAPI, OpenAPI, and others. For more details, see {registry-reference}. diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-client.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-client.adoc index 538e7275e3..c17d3adad0 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-client.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-client.adoc @@ -1,24 +1,10 @@ // Metadata created by nebel [id="registry-client-intro"] -= {registry} Java client overview += {registry} Java client -You can manage artifacts stored in {registry} using a Java client application. You can create, read, update, or delete artifacts stored -in the registry using the {registry} Java client classes. +You can manage artifacts stored in {registry} using a Java client application. You can create, read, update, or delete artifacts stored in the registry using the {registry} Java client classes. -You can access the {registry} Java client by adding the proper dependency to your project, see xref:writing-registry-client[]. +You can access the {registry} Java client by adding the correct dependency to your project, see xref:writing-registry-client[]. -The {registry} client is autocloseable and is implemented using Retrofit and OkHttp as base libraries. This gives the user the ability to customize its -usage by, for example, adding custom headers or enabling TLS auth support. - -== Enabling TLS support in the client - -You can configure TLS authentication using the following properties: - -* apicurio.registry.request.ssl.truststore.location -* apicurio.registry.request.ssl.truststore.password -* apicurio.registry.request.ssl.truststore.type -* apicurio.registry.request.ssl.keystore.location -* apicurio.registry.request.ssl.keystore.password -* apicurio.registry.request.ssl.keystore.type -* apicurio.registry.request.ssl.key.password +The {registry} client is auto-closeable and is implemented using Retrofit and OkHttp as base libraries. This gives you the ability to customize its use, for example, by adding custom headers or enabling Transport Layer Security (TLS) authentication. For more details, see xref:registry-client-config[]. diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-demo.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-demo.adoc index ad29a33176..355d9e17b6 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-demo.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-demo.adoc @@ -1,11 +1,20 @@ // Metadata created by nebel [id="registry-demo"] -= Registry demonstration examples -{registry} provides an open source demonstration example of Apache Avro serialization/deserialization with storage in Apache Kafka Streams. This example shows how the serializer/deserializer obtains the Avro schema from the registry at runtime and uses it to serialize and deserialize Kafka messages. For more details, see link:https://github.com/Apicurio/apicurio-registry-demo[]. += {registry} demonstration examples +{registry} provides an open source demonstration of Apache Avro serialization/deserialization with storage in Apache Kafka Streams. This example shows how the serializer/deserializer obtains the Avro schema from the registry at runtime and uses it to serialize and deserialize Kafka messages. For more details, see link:https://github.com/Apicurio/apicurio-registry-demo[]. -This demonstration also provides simple examples of both -link:https://github.com/Apicurio/apicurio-registry-demo/tree/master/src/main/java/io/apicurio/registry/demo/simple[Avro and JSON Schema serialization/deserialization with storage in Apache Kafka]. +{registry} also provides the following example applications: + +* Simple Avro example +* Simple JSON Schema example +* Confluent Serdes integration +* Avro bean example +* Custom ID strategy example +* Simple Avro Maven example +* REST client example + +For more details, see link:https://github.com/Apicurio/apicurio-registry-examples[] ifdef::rh-service-registry[] For another open source demonstration example with detailed instructions on Avro serialization/deserialization with storage in Apache Kafka, see the Red Hat Developer article on link:https://developers.redhat.com/blog/2019/12/16/getting-started-with-red-hat-integration-service-registry/[Getting Started with Red Hat Integration Service Registry]. diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-distros.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-distros.adoc index 11230ac5c1..c2dbfac78c 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-distros.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-distros.adoc @@ -1,9 +1,7 @@ // Metadata created by nebel [id="registry-distros"] -= Available distributions - -{registry} includes the following distributions: += {registry} available distributions ifdef::apicurio-registry[] diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-overview.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-overview.adoc index d2374e1643..cf92deafbb 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-overview.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-overview.adoc @@ -21,7 +21,7 @@ ifdef::rh-service-registry[] endif::[] [discrete] -== {registry} main features +== {registry} capabilities * Support for multiple payload formats for standard event schemas and API specifications diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-rest-api.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-rest-api.adoc index 52d88cab0e..138bf59ceb 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-rest-api.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-rest-api.adoc @@ -1,7 +1,7 @@ // Metadata created by nebel [id="registry-rest-api"] -= Registry REST API += Registry REST API overview Using the Registry REST API, client applications can manage the artifacts in {registry}. This API provides create, read, update, and delete operations for: Artifacts:: diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-rules.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-rules.adoc index e894867af3..3d641f0e75 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-rules.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-rules.adoc @@ -2,7 +2,7 @@ [id="registry-rules"] -= Rules for registry content += Govern registry content using rules To govern the evolution of registry content, you can configure optional rules for artifact content added to the registry. All configured global rules or artifact rules must pass before a new artifact version can be uploaded to the registry. Configured artifact rules override any configured global rules. The goal of these rules is to prevent invalid content from being added to the registry. For example, content can be invalid for the following reasons: @@ -32,31 +32,32 @@ Each rule has a name and optional configuration information. The registry storag A rule is provided with the content of the current version of the artifact (if one exists) and the new version of the artifact being added. The rule implementation returns true or false depending on whether the artifact passes the rule. If not, the registry reports the reason why in an HTTP error response. Some rules might not use the previous version of the content. For example, compatibility rules use previous versions, but syntax or semantic validity rules do not. .Additional resources -For more details, see {registry-rule-types} and {registry-rule-maturity-matrix}. +For more details, see {registry-reference}. [id="registry-rules-config"] -= Configuring rules -You can configure rules individually for each artifact, as well as globally. {registry} applies the rules configured -for the specific artifact, but if no rules are configured at that level it applies the globally configured rules. If -no global rules are configured then no rules are applied. += Content rule configuration +You can configure rules individually for each artifact, as well as globally. {registry} applies the rules configured for the specific artifact. But if no rules are configured at that level, {registry} applies the globally configured rules. If no global rules are configured, no rules are applied. -== Configuring artifact rules -You can configure artifact rules using the REST API or the web console. See the relevant documentation sections -for more information. +[discrete] +== Configure artifact rules +You can configure artifact rules using the {registry} web console or REST API. For details, see the following: -== Configuring global rules +* {managing-registry-artifacts-ui} +* link:{attachmentsdir}/registry-rest-api.htm[Apicurio Registry REST API documentation] + +[discrete] +== Configure global rules You can configure global rules in several ways: * Use the `/rules` operations in the REST API -* Use the web console +* Use the {registry} web console * Set default global rules using {registry} application properties -.Default global rules -You can configure the {registry} at the application level to enable or disable global rules. This allows -configuring of these global rules at installation time without any need for post-install configuration. You -can use the following application property format: - -`registry.rules.global.` +.Configure default global rules +You can configure {registry} at the application level to enable or disable global rules. You can configure default global rules at installation time without post-install configuration using the following application property format: +---- +registry.rules.global. +---- The following rule names are currently supported: @@ -64,8 +65,9 @@ The following rule names are currently supported: * `validity` The value of the application property must be a valid configuration option that is specific to the rule being -configured. The following is a table of valid values for each rule: +configured. The following table shows the valid values for each rule: +.{registry} content rules [%header,cols=2*] |=== |Rule @@ -93,5 +95,4 @@ a| `NONE` |=== NOTE: You can configure these application properties as Java system properties or include them in the Quarkus -`application.properties` file. See more information about configuring {registry} elsewhere in this -documentation and also in https://quarkus.io/guides/config#overriding-properties-at-runtime[Quarkus documentation]. +`application.properties` file. For more details, see the https://quarkus.io/guides/config#overriding-properties-at-runtime[Quarkus documentation]. diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serde.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serde.adoc index d5d187e62b..d4e89e2156 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-serde.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serde.adoc @@ -2,7 +2,7 @@ [id="client-serde"] -= Kafka client serializers/deserializers += Validate schemas with Kafka client serializers/deserializers Kafka producer applications can use serializers to encode messages that conform to a specific event schema. Kafka consumer applications can then use deserializers to validate that messages have been serialized using the correct schema, based on a specific schema ID. .{registry} and Kafka client serializer/deserializer architecture @@ -16,10 +16,5 @@ image::images/getting-started/registry-serdes-architecture.png[Registry Serdes a The {registry} Maven repository and source code distributions include the Kafka serializer/deserializer implementations for these message types, which Kafka client developers can use to integrate with the registry. These implementations include custom `io.apicurio.registry.utils.serde` Java classes for each supported message type, which client applications can use to pull schemas from the registry at runtime for validation. -ifdef::rh-service-registry[] - .Additional resources -* For instructions on how to use the {registry} Kafka client serializer/deserializer for Apache Avro in AMQ Streams producer and consumer applications, see -link:https://access.redhat.com/documentation/en-us/red_hat_amq/{amq-version}/html/using_amq_streams_on_openshift/service-registry-str[Using AMQ Streams on Openshift]. - -endif::[] +* {kafka-client-serdes} diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-concepts.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-concepts.adoc index 8d3f010541..0a0ae09bbc 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-concepts.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-concepts.adoc @@ -15,9 +15,7 @@ Schemas can evolve, so you can define rules in {registry}, for example, to ensur * Protobuf * JSON Schema -These schema technologies can be used by client applications through Kafka client serializer/deserializer (SerDe) -services provided by {registry}. The maturity and usage of the SerDe classes provided by {registry} may vary. See -the type-specific sections below for more details about each. +These schema technologies can be used by client applications through Kafka client serializer/deserializer (SerDe) services provided by {registry}. The maturity and usage of the SerDe classes provided by {registry} may vary. See the type-specific sections below for more details about each. = Producer schema configuration @@ -30,12 +28,10 @@ To enable a producer to use {registry} for serialization: ** URL of {registry} ** {registry} serializer to use with the messages -** _Strategy_ to map the Kafka message to an artifact ID in {registry} -** _Strategy_ to look up (or register) the schema used for serialization in {registry} +** Strategy to map the Kafka message to an artifact ID in {registry} +** Strategy to look up or register the schema used for serialization in {registry} -After registering your schema, when you start Kafka and {registry}, you can access the schema to format messages -sent to the Kafka broker topic by the producer. Alternatively (depending on configuration), the producer can -automatically register the schema on first use. +After registering your schema, when you start Kafka and {registry}, you can access the schema to format messages sent to the Kafka broker topic by the producer. Alternatively (depending on configuration), the producer can automatically register the schema on first use. If a schema already exists, you can create a new version using the REST API based on compatibility rules defined in {registry}. Versions are used for compatibility checking as a schema evolves. An artifact ID and schema version represents a unique tuple that identifies a schema. @@ -50,15 +46,11 @@ To enable a consumer to use {registry} for deserialization: ** {registry} deserializer to use with the messages ** Input data stream for deserialization -The schema is then retrieved by the deserializer using a global ID written into the message being consumed. The schema -global ID can be located in the message headers or in the message payload itself, depending on the configuration of -the producer application. +The schema is then retrieved by the deserializer using a global ID written into the message being consumed. The schema global ID can be located in the message headers or in the message payload itself, depending on the configuration of the producer application. -When locating the global ID in the message payload, the format of the data begins with a magic byte (as a signal to -consumers) followed by the global ID and then the message data as normal. +When locating the global ID in the message payload, the format of the data begins with a magic byte (as a signal to consumers) followed by the global ID and then the message data as normal. For example: - [source,shell,subs="+quotes,attributes"] ---- # ... diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc index 0bf39f3f52..c938836e86 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc @@ -11,30 +11,24 @@ For a given topic and message, you can use implementations of the following Java * `ArtifactIdStrategy` to return an artifact ID * `GlobalIdStrategy` to return a global ID -.Artifact ID strategy - -The artifact ID strategy provides a way to map the Kafka topic and message information to the ID of an artifact in -{registry}. The common convention for the mapping is to combine the Kafka topic name with either `key` or `value` -(depending on whether the serializer is being used for the Kafka message key or value). However, alternative -conventions can be used for the mapping, either by using an alternative strategy provided by {registry} or by -creating a custom Java class that implements `io.apicurio.registry.utils.serde.strategy.ArtifactIdStrategy`. - -.Global ID strategy - -The global ID strategy is responsible for locating and identifying the specific *version* of the schema registered -under the artifact ID provided by the artifact ID strategy. Every version of every artifact has a single globally -unique identifier that can be used to retrieve the content of that artifact. As described in a previous section, -that global ID is what gets included in every Kafka message so that a deserializer can properly fetch the schema -from {registry}. The global ID strategy can either lookup an existing artifact version, or it can register one if -not found, depending on which strategy is used. Additionally, you can provide your own strategy by creating a -custom Java class that implements `io.apicurio.registry.utils.serde.strategy.GlobalIdStrategy`. +[discrete] +== Artifact ID strategy -The classes for each strategy are organized in the `io.apicurio.registry.utils.serde.strategy` package. +The artifact ID strategy provides a way to map the Kafka topic and message information to the ID of an artifact in {registry}. The common convention for the mapping is to combine the Kafka topic name with either `key` or `value`, depending on whether the serializer is being used for the Kafka message key or value). -The default artifact ID strategy is `TopicIdStrategy`, which looks for {registry} artifacts with the same name as the Kafka topic receiving messages. +However, you can use alternative conventions for the mapping, either by using an alternative strategy provided by {registry} or by creating a custom Java class that implements `io.apicurio.registry.utils.serde.strategy.ArtifactIdStrategy`. -.Example +[discrete] +== Global ID strategy + +The global ID strategy locates and identifies the specific version of the schema registered under the artifact ID provided by the artifact ID strategy. Every version of every artifact has a single globally unique identifier that can be used to retrieve the content of that artifact. That global ID is what gets included in every Kafka message so that a deserializer can properly fetch the schema from {registry}. +The global ID strategy can either lookup an existing artifact version, or it can register one if not found, depending on which strategy is used. Additionally, you can provide your own strategy by creating a +custom Java class that implements `io.apicurio.registry.utils.serde.strategy.GlobalIdStrategy`. + +The classes for each strategy are organized in the `io.apicurio.registry.utils.serde.strategy` package. The default artifact ID strategy is `TopicIdStrategy`, which looks for {registry} artifacts with the same name as the Kafka topic receiving messages. + +.Example [source,java,subs="+quotes,attributes"] ---- public String artifactId(String topic, boolean isKey, T schema) { diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-types.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-types.adoc index a8fbb61487..ca49069878 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-types.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-types.adoc @@ -4,25 +4,20 @@ [id='registry-serdes-types-serde-{context}'] = Configuring different SerDe types -When using a schema technology in your Kafka applications, you must choose which specific schema type to use. Common -options include: +When using a schema technology in your Kafka applications, you must choose which specific schema type to use. Common options include: * Apache Avro * JSON Schema * Google Protobuf -Which schema technology you choose is dependent on use case and preference. Of course Kafka allows you to implement -your own custom serializer and deserializer classes, so you are always free to write your own classes, including -leveraging {registry} functionality by utilizing the REST Client (see the "Using the {registry} Java client" section). +Which schema technology you choose is dependent on use case and preference. Of course you can use Kafka to implement custom serializer and deserializer classes, so you are always free to write your own classes, including leveraging {registry} functionality using the {registry} REST Java client. -For your convenience, {registry} provides out of the box SerDe classes for all three of the above schema technologies. -This section of the documentation explains how to configure Kafka applications to use each type. +For your convenience, {registry} provides out-of-the box SerDe classes for all three schema technologies. This section explains how to configure Kafka applications to use each type. -Using one of the serializer or deserializer classes provided by {registry} in your Kafka application is as simple -as setting the proper configuration properties. Here are some simple examples of configuring producer and -consumer Kafka applications. +Using one of the serializer or deserializer classes provided by {registry} in your Kafka application involves setting the correct configuration properties. Here are some simple examples of configuring producer and consumer Kafka applications. -.Configuring a producer +[discrete] +== Configuring a producer [source,java,subs="+quotes,attributes"] ---- @@ -55,7 +50,8 @@ public Producer createKafkaProducer(String kafkaBootstrapServers, } ---- -.Configuring a consumer +[discrete] +== Configuring a consumer [source,java,subs="+quotes,attributes"] ---- @@ -87,11 +83,10 @@ public Consumer createKafkaConsumer(String kafkaBootstrapServers, } ---- - == Using Avro SerDe with {registry} -{registry} provides serializer and deserializer classes for Apache Avro out of the box, to make using Avro as -easy as possible. These classes are: +{registry} provides serializer and deserializer classes for Apache Avro to make using Avro as +easy as possible. These classes are: * `io.apicurio.registry.utils.serde.AvroKafkaSerializer` * `io.apicurio.registry.utils.serde.AvroKafkaDeserializer` @@ -101,65 +96,53 @@ easy as possible. These classes are: You can configure the Avro serializer class in the following ways: * {registry} location as a URL -* Artifact ID strategy (see the "Strategies to lookup a schema" section) -* Global ID strategy (see the "Strategies to lookup a schema" section) +* Artifact ID strategy +* Global ID strategy * Global ID location * Global ID handler * Avro datum provider * Avro encoding -==== *Global ID location* - -The serializer is responsible for passing the unique global ID of the schema as part of the Kafka message so that -consumers can use the right schema for deserialization. The location of that global ID can be in the payload of -the message or in the message headers. The default approach is to pass the global ID in the message payload. If -you want the ID sent in the message headers instead, you can set the following configuration property: - -`props.putIfAbsent(AbstractKafkaSerDe.USE_HEADERS, "true")` - +.Global ID location +lizer is responsible for passing the unique global ID of the schema as part of the Kafka message so that consumers can use the right schema for deserialization. The location of that global ID can be in the payload of the message or in the message headers. The default approach is to pass the global ID in the message payload. If you want the ID sent in the message headers instead, you can set the following configuration property: +---- +props.putIfAbsent(AbstractKafkaSerDe.USE_HEADERS, "true") +---- The property name is `apicurio.registry.use.headers`. -==== *Global ID handler* - +.Global ID handler You can customize precisely how the global ID is encoded when passing it in the Kafka message body. Set the configuration property `apicurio.registry.id-handler` to be a class that implements the -`io.apicurio.registry.utils.serde.strategy.IdHandler` interface. {registry} provides two implementations of +`io.apicurio.registry.utils.serde.strategy.IdHandler` interface. {registry} provides two implementations of that interface: * `io.apicurio.registry.utils.serde.strategy.DefaultIdHandler` - stores the ID as an 8 byte long * `io.apicurio.registry.utils.serde.strategy.Legacy4ByteIdHandler` - stores the ID as an 4 byte int -{registry} represents the global ID of an artifact as a long, but for legacy reasons (or for compatibility with -other registries or serde classes) you may want to use 4 bytes when sending the ID. +{registry} represents the global ID of an artifact as a long, but for legacy reasons (or for compatibility with other registries or serde classes) you may want to use 4 bytes when sending the ID. -==== *Avro datum provider* +.Avro datum provider Avro provides different Datum writers and readers to write and read data. {registry} supports three different types: * Generic * Specific * Reflect -{registry}'s `AvroDatumProvider` is the abstraction on which type is then actually used, -where `DefaultAvroDatumProvider` is used by default. +The {registry} `AvroDatumProvider` is the abstraction on which type is then actually used, where `DefaultAvroDatumProvider` is used by default. There are two configuration options you can set: -* `apicurio.registry.avro-datum-provider` - provide a fully qualified Java classname of the `AvroDatumProvider` implementation, for example `io.apicurio.registry.utils.serde.avro.ReflectAvroDatumProvider` -* `apicurio.registry.use-specific-avro-reader` - true or false, to use Specific type when using `DefaultAvroDatumProvider` +* `apicurio.registry.avro-datum-provider` - provide a fully qualified Java class name of the `AvroDatumProvider` implementation, for example `io.apicurio.registry.utils.serde.avro.ReflectAvroDatumProvider` +* `apicurio.registry.use-specific-avro-reader` - true or false, to use specific type when using `DefaultAvroDatumProvider` -==== *Avro encoding* +.Avro encoding -When using Apache Avro to serializer data, it is common to use the Avro binary encoding format. This is so that -the data is encoded in as efficient a format as possible. However, Avro also supports encoding the data as JSON. -Encoding as JSON is useful because it is much easier to inspect the payload of each message, often for logging, -debugging, or other similar use cases. The {registry} Avro serializer can be configured to change the encoding -to JSON from the default (binary). +When using Apache Avro to serializer data, it is common to use the Avro binary encoding format. This is so that the data is encoded in as efficient a format as possible. However, Avro also supports encoding the data as JSON. Encoding as JSON is useful because it is much easier to inspect the payload of each message, often for logging, debugging, or other similar use cases. The {registry} Avro serializer can be configured to change the encoding to JSON from the default (binary). Set the Avro encoding to use by configuring the `apicurio.avro.encoding` property. The value must be either `JSON` or `BINARY`. - === Configuring the Avro deserializer You must configure the Avro deserializer class to match the configuration settings of the serializer. As a @@ -172,45 +155,46 @@ result, you can configure the Avro deserializer class in the following ways: See the serializer documentation for the above configuration options - the property names and values are the same. -NOTE: The following options are *not* needed when configuring the deserializer: +[NOTE] +==== +The following options are not needed when configuring the deserializer: * Artifact ID strategy * Global ID strategy * Global ID location +==== The reason these options are not necessary is that the deserializer class can figure this information out from -the message itself. In the case of the two strategies, they are not needed because the serializer is responsible -for sending the global ID of the schema as part of the message. And the location of that global ID is determined -(by the deserializer) by simply checking for the magic byte at the start of the message payload. If that byte is -found then the global ID is read from the message payload (using the configured handler). If the magic byte is -not found, then the global ID is read from the message headers. +the message itself. In the case of the two strategies, they are not needed because the serializer is responsible for sending the global ID of the schema as part of the message. + +The location of that global ID is determined by the deserializer by simply checking for the magic byte at the start of the message payload. If that byte is found, the global ID is read from the message payload using the configured handler. If the magic byte is not found, the global ID is read from the message headers. == Using JSON Schema SerDe with {registry} -{registry} provides serializer and deserializer classes for JSON Schema out of the box, to make using JSON Schema as -easy as possible. These classes are: +{registry} provides serializer and deserializer classes for JSON Schema to make using JSON Schema as easy as possible. These classes are: * `io.apicurio.registry.utils.serde.JsonSchemaKafkaSerializer` * `io.apicurio.registry.utils.serde.JsonSchemaKafkaDeserializer` Unlike Apache Avro, JSON Schema is not actually a serialization technology - it is instead a validation -technology. As a result, configuration options for JSON Schema are quite different. For example, there is no -*encoding* option, since data is always encoded as JSON. +technology. As a result, configuration options for JSON Schema are quite different. For example, there is no +encoding option, because data is always encoded as JSON. === Configuring the JSON Schema serializer You can configure the JSON Schema serializer class in the following ways: * {registry} location as a URL -* Artifact ID strategy (see the "Strategies to lookup a schema" section) -* Global ID strategy (see the "Strategies to lookup a schema" section) +* Artifact ID strategy +* Global ID strategy * Validation enabled/disabled As you can see, the only non-standard configuration property is whether JSON Schema validation is enabled or disabled. The validation feature is disabled by default but can be enabled by setting -`apicurio.registry.serdes.json-schema.validation-enabled` to `"true"`. For example: - -`props.putIfAbsent(JsonSchemaSerDeConstants.REGISTRY_JSON_SCHEMA_VALIDATION_ENABLED, "true")` +`apicurio.registry.serdes.json-schema.validation-enabled` to `"true"`. For example: +---- +props.putIfAbsent(JsonSchemaSerDeConstants.REGISTRY_JSON_SCHEMA_VALIDATION_ENABLED, "true")` +---- === Configuring the JSON Schema deserializer @@ -219,17 +203,14 @@ You can configure the JSON Schema deserializer class in the following ways: * {registry} location as a URL * Validation enabled/disabled -As you can see, the deserializer is very simple to configure. You need to provide the location of {registry} so -that the schema can be loaded. The only other configuration is whether or not to perform validation. These +The deserializer is simple to configure. You must provide the location of {registry} so that the schema can be loaded. The only other configuration is whether or not to perform validation. These configuration properties are the same as for the serializer. -NOTE: Deserializer validation will only work if the serializer passes the global ID in the Kafka message, which -will only happen when validation is enabled in the serializer. +NOTE: Deserializer validation only works if the serializer passes the global ID in the Kafka message, which will only happen when validation is enabled in the serializer. == Using Protobuf SerDe with {registry} -{registry} provides serializer and deserializer classes for Google Protobuf out of the box, to make using Protobuf as -easy as possible. These classes are: +{registry} provides serializer and deserializer classes for Google Protobuf out of the box, to make using Protobuf as easy as possible. These classes are: * `io.apicurio.registry.utils.serde.ProtobufKafkaSerializer` * `io.apicurio.registry.utils.serde.ProtobufKafkaDeserializer` @@ -239,33 +220,33 @@ easy as possible. These classes are: You can configure the Protobuf serializer class in the following ways: * {registry} location as a URL -* Artifact ID strategy (see the "Strategies to lookup a schema" section) -* Global ID strategy (see the "Strategies to lookup a schema" section) +* Artifact ID strategy +* Global ID strategy * Global ID location * Global ID handler === Configuring the Protobuf deserializer -You must configure the Protobuf deserializer class to match the configuration settings of the serializer. As a -result, you can configure the Protobuf deserializer class in the following ways: +You must configure the Protobuf deserializer class to match the configuration settings of the serializer. As a result, you can configure the Protobuf deserializer class in the following ways: * {registry} location as a URL * Global ID handler -See the serializer documentation for the above configuration options - the property names and values are the same. +See the serializer documentation these configuration options - the property names and values are the same. -NOTE: The following options are *not* needed when configuring the deserializer: +[NOTE] +==== +The following options are not needed when configuring the deserializer: * Artifact ID strategy * Global ID strategy * Global ID location +==== The reason these options are not necessary is that the deserializer class can figure this information out from -the message itself. In the case of the two strategies, they are not needed because the serializer is responsible -for sending the global ID of the schema as part of the message. And the location of that global ID is determined -(by the deserializer) by simply checking for the magic byte at the start of the message payload. If that byte is -found then the global ID is read from the message payload (using the configured handler). If the magic byte is -not found, then the global ID is read from the message headers. - -NOTE: the Protobuf deserializer doesn't deserialize to your exact Protobuf Message implementation, -but rather to a `DynamicMessage` instance (as there is no appropriate API to do otherwise). +the message itself. In the case of the two strategies, they are not needed because the serializer is responsible for sending the global ID of the schema as part of the message. + +The location of that global ID is determined (by the deserializer) by simply checking for the magic byte at the start of the message payload. If that byte is found, the global ID is read from the message payload (using the configured handler). If the magic byte is not found, the global ID is read from the message headers. + +NOTE: The Protobuf deserializer does not deserialize to your exact Protobuf Message implementation, +but rather to a `DynamicMessage` instance (because there is no appropriate API to do otherwise). diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-storage.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-storage.adoc index d51e529319..202af8236b 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-storage.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-storage.adoc @@ -1,7 +1,7 @@ // Metadata created by nebel [id="registry-storage"] -= Storage options += {registry} storage options {registry} provides the following underlying storage implementations for registry artifacts: ifdef::apicurio-registry[] @@ -12,7 +12,7 @@ ifdef::apicurio-registry[] * Apache Kafka Streams * Embedded Infinispan cache -NOTE: The in-memory storage option is suitable for a development environment only. All data is lost when restarting this storage implementation. All other storage options are suitable for development and production environments. +NOTE: The in-memory storage option is suitable for a development environment only. All data is lost when restarting {registry} with this storage. The Kafka Streams storage option is recommended for production environments. endif::[] @@ -43,9 +43,6 @@ These features provide early access to upcoming product features, enabling custo endif::[] .Additional resources - -For details on how to install into your preferred storage option, see: - ifdef::apicurio-registry[] * {installing-the-registry-docker} endif::[] diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-web-console.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-web-console.adoc index a8a815bf61..2ec474592e 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-web-console.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-web-console.adoc @@ -1,13 +1,16 @@ // Metadata created by nebel [id="registry-web-console"] -= {registry} web console += Manage content using {registry} web console You can use the {registry} web console to browse and search the artifacts stored in the registry, and to upload new artifacts and artifact versions. You can search for artifacts by label, name, and description. You can also view an artifact’s content, view all of its available versions, or download an artifact file locally. -You can also use the {registry} web console to configure optional rules for registry content, both globally and for each artifact. These optional rules for content validation and compatibility are applied when new artifacts or artifact versions are uploaded to the registry. For more details, see {registry-rule-types} and {registry-rule-maturity-matrix}. +You can also use the {registry} web console to configure optional rules for registry content, both globally and for each artifact. These optional rules for content validation and compatibility are applied when new artifacts or artifact versions are uploaded to the registry. For more details, see {registry-reference}. .{registry} web console image::images/getting-started/registry-web-console.png[{registry} web console] -The {registry} web console is available from the main endpoint of your {registry} deployment, for example, on `\http://MY-REGISTRY-URL/ui`. For more details, see {managing-registry-artifacts-ui}. +The {registry} web console is available from the main endpoint of your {registry} deployment, for example, on `\http://MY-REGISTRY-URL/ui`. + +.Additional resources + * {managing-registry-artifacts-ui} diff --git a/docs/modules/ROOT/partials/getting-started/proc-configuring-registry-ui.adoc b/docs/modules/ROOT/partials/getting-started/proc-configuring-registry-ui.adoc index dba292adf9..eec08c129a 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-configuring-registry-ui.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-configuring-registry-ui.adoc @@ -1,36 +1,39 @@ [id="configuring-registry-ui"] -= Configuring {registry} web console += Configuring {registry} web console -You can configure the {registry} web console in a number of ways, either to customize its behavior or to properly -configure it for your deployment environment. +You can configure the {registry} web console specifically for your deployment environment or to customize its behavior. This section provides details on how to configure optional environment variables for the {registry} web console. -== Configuring {registry} web console for deployment environment +.Prerequisites +* You must have already installed {registry}. -When a user navigates their browser to the {registry} web console, some initial configuration settings are loaded. -Two important configuration properties are: +[discrete] +== Configuring the web console deployment environment -* URL of the back-end API -* URL of the front-end web console +When a user navigates their browser to the {registry} web console, some initial configuration settings are loaded. Two important configuration properties are: -Typically {registry} will automatically detect and generate these settings, but there are some deployment environments -where this automatic detection can fail. When this happens, you can configure the following environment variables to -explicitly set them: +* URL for backend {registry} REST API +* URL for frontend {registry} web console -* *_REGISTRY_UI_CONFIG_APIURL_* : set to override the URL to the back-end API (example https://registry.my-domain.com/api) -* *_REGISTRY_UI_CONFIG_UIURL_* : set to override the URL to the front-end web console (example https://registry.my-domain.com/ui) +Typically, {registry} automatically detects and generates these settings, but there are some deployment environments where this automatic detection can fail. If this happens, you can configure environment variables to explicitly set these URLs for your environment. -== Configuring {registry} console for read-only mode +.Procedure +Configure the following environment variables to override the default URLs: -An optional feature that can be enabled in {registry} is the ability to put the web console into "Read Only" -mode. This mode disables all of the features in the web console that would allow a user to make changes to -registered artifacts. This includes (but is not limited to): +* `REGISTRY_UI_CONFIG_APIURL`: Set the URL for the backend {registry} REST API. For example,`\https://registry.my-domain.com/api` +* `REGISTRY_UI_CONFIG_UIURL`: Set the URL for the frontend {registry} web console. For example, `\https://registry.my-domain.com/ui` + +[discrete] +== Configuring the console in read-only mode + +You can configure the {registry} web console in read-only mode as an optional feature. This mode disables all features in the {registry} web console that allow users to make changes to registered artifacts. For example, this includes the following: * Creating an artifact * Uploading a new version of an artifact * Updating an artifact's metadata * Deleting an artifact -To put the web console into read only mode, set the following environment variable: +.Procedure +Configure the following environment variable to set the {registry} web console in read-only mode: -* *_REGISTRY_UI_FEATURES_READONLY_* : set to `true` to enable "Read Only" mode (default `false`) +* `REGISTRY_UI_FEATURES_READONLY`: Set to `true` to enable read-only mode. Defaults to `false`. diff --git a/docs/modules/ROOT/partials/getting-started/proc-managing-artifacts-using-maven-plugin.adoc b/docs/modules/ROOT/partials/getting-started/proc-managing-artifacts-using-maven-plugin.adoc index c17da1ddee..c450d3780a 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-managing-artifacts-using-maven-plugin.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-managing-artifacts-using-maven-plugin.adoc @@ -4,22 +4,16 @@ [id="managing-artifacts-using-maven-plugin"] = Managing artifacts using the {registry} Maven plug-in -{registry} provides a Maven plug-in to enable you to upload or download registry artifacts as part of your development build. For example, this plug-in is useful for testing and validating that your schema updates are compatible with client applications. - -.Prerequisites - -* See {registry-overview} -* {registry} must be installed and running in your environment -* Maven must be installed and configured in your environment +You can use the {registry} Maven plug-in to upload or download registry artifacts as part of your development build. For example, this plug-in is useful for testing and validating that your schema updates are compatible with client applications. +[discrete] == Registering an artifact using the Maven plug-in -Probably the most common use case for the Maven plug-in is registering artifacts during a build. You can accomplish -this by using the `register` goal provided. Simply update your Maven `pom.xml` file to use the -`apicurio-registry-maven-plugin` to upload an artifact to {registry}. - -The following example shows registering an Apache Avro schema artifact: +Probably the most common use case for the Maven plug-in is registering artifacts during a build. You can accomplish this by using the `register` execution goal. +.Procedure +* Update your Maven `pom.xml` file to use the `apicurio-registry-maven-plugin` to register an artifact. The following example shows registering an Apache Avro schema: ++ [source,xml] ---- @@ -43,16 +37,17 @@ The following example shows registering an Apache Avro schema artifact: ---- -<1> Specify `register` as the execution goal to upload an artifact to the registry. +<1> Specify `register` as the execution goal to upload the schema artifact to the registry. <2> You must specify the {registry} URL with the `/api` endpoint. <3> You can upload multiple artifacts using the artifact ID and location. +[discrete] == Downloading an artifact using the Maven plug-in -You can also use the Maven plug-in to download artifacts from {registry}. This is often useful, for example, when -generating code from a registered schema. - -The following example shows downloading a single schema by its artifact ID. +You can also use the Maven plug-in to download artifacts from {registry}. This is often useful, for example, when generating code from a registered schema. +.Procedure +* Update your Maven `pom.xml` file to use the `apicurio-registry-maven-plugin` to download an artifact. The following example shows downloading a single schema by its artifact ID. ++ [source,xml] ---- @@ -80,17 +75,17 @@ The following example shows downloading a single schema by its artifact ID. <1> Specify `download` as the execution goal. <2> You must specify the {registry} URL with the `/api` endpoint. <3> You can download multiple artifacts to a specified directory using the artifact ID. -<4> The plug-in will automatically try to select an appropriate file extension, but you can override it using ``. - -== Testing an artifact -You may want to simply verify that an artifact can be registered without actually making any changes. This is most -often useful when rules have been configured in {registry}. Testing the artifact will result in a failure if the -artifact content violates any of the configured rules. +<4> The plug-in automatically tries to select an appropriate file extension, but you can override it using ``. -NOTE: Even if the artifact passes the test, no content will be added to {registry}. +[discrete] +== Testing an artifact using the Maven plug-in +You might want to verify that an artifact can be registered without actually making any changes. This is most often useful when rules are configured in {registry}. Testing the artifact results in a failure if the artifact content violates any of the configured rules. -The following example shows testing an Apache Avro schema artifact: +NOTE: Even if the artifact passes the test, no content is added to {registry}. +.Procedure +* Update your Maven `pom.xml` file to use the `apicurio-registry-maven-plugin` to test an artifact. The following example shows testing an Apache Avro schema: ++ [source,xml] ---- @@ -114,9 +109,9 @@ The following example shows testing an Apache Avro schema artifact: ---- -<1> Specify `test-update` as the execution goal to test an artifact. +<1> Specify `test-update` as the execution goal to test the schema artifact. <2> You must specify the {registry} URL with the `/api` endpoint. <3> You can test multiple artifacts using the artifact ID and location. .Additional resources - * For more details on the Maven plug-in, see https://github.com/Apicurio/apicurio-registry-demo. + * For more details on the {registry} Maven plug-in, see the link:https://github.com/Apicurio/apicurio-registry-demo[Registry demonstration example] diff --git a/docs/modules/ROOT/partials/getting-started/proc-writing-registry-client.adoc b/docs/modules/ROOT/partials/getting-started/proc-writing-registry-client.adoc index 4254a03c55..8f12b5f6c2 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-writing-registry-client.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-writing-registry-client.adoc @@ -2,15 +2,17 @@ // ParentAssemblies: assemblies/getting-started/as_installing-the-registry.adoc [id="writing-registry-client"] -= Writing a {registry} client application += Writing {registry} client applications -This section explains how to manage artifacts stored in {registry} using a Java client application. +This section explains how to manage artifacts stored in {registry} using a Java client application. The {registry} Java client extends the `Autocloseable` interface. .Prerequisites * See {registry-overview} -* {registry} must be installed and running in your environment. -* Add the following dependency to your Maven project: +* {registry} must be installed and running in your environment +.Procedure +. Add the following dependency to your Maven project: ++ [source,xml,subs="+quotes,attributes"] ---- @@ -20,8 +22,7 @@ This section explains how to manage artifacts stored in {registry} using a Java ---- -.Procedure -. Create a registry client +. Create a registry client as follows: + [source,java,subs="+quotes,attributes"] ---- @@ -30,22 +31,19 @@ public class ClientExample { private static final RegistryRestClient client; public static void main(String[] args) throws Exception { - // Create a Service Registry client - String registryUrl = "http://localhost:8080/api/"; - RegistryRestClient client = RegistryRestClientFactory.create(registryUrl); <1> + // Create a registry client + String registryUrl = "https://registry.my-domain.com/api"; <1> + RegistryRestClient client = RegistryRestClientFactory.create(registryUrl); <2> } } ---- -<1> For more options on how to create a {registry} client, see {registry-client-types}. - -. Once created, all the operations from the {registry} REST API are available through the client. For details about the available -operations, see the REST API documentation. - +<1> You must specify the {registry} URL with the `/api` endpoint. +<2> For more options when creating a {registry} client, see the Java client configuration in the next section. -The {registry} Java client extends the interface Autocloseable. +. When the client is created, you can use all the operations from the {registry} REST API through the client. For more details, see the link:{attachmentsdir}/registry-rest-api.htm[Apicurio Registry REST API documentation]. .Additional resources -* For more examples on how to use or customize the {registry} client see https://github.com/Apicurio/apicurio-registry-examples/blob/master/rest-client +* For an example of how to use and customize the {registry} client, see the https://github.com/Apicurio/apicurio-registry-examples/blob/master/rest-client[Registry client demonstration example]. ifdef::rh-service-registry[] * For details on how to use the {registry} Kafka client serializer/deserializer for Apache Avro in AMQ Streams producer and consumer applications, see diff --git a/docs/modules/ROOT/partials/getting-started/ref-registry-artifact-metadata.adoc b/docs/modules/ROOT/partials/getting-started/ref-registry-artifact-metadata.adoc new file mode 100644 index 0000000000..037b7a302d --- /dev/null +++ b/docs/modules/ROOT/partials/getting-started/ref-registry-artifact-metadata.adoc @@ -0,0 +1,58 @@ +// Metadata created by nebel + +[id="registry-artifact-metadata"] += {registry} artifact metadata + +When an artifact is added to {registry}, a set of metadata properties is stored along with the artifact content. This metadata consists of a set of generated read-only properties, along with some properties that you can set. + +.{registry} metadata properties +[%header,cols=3*] +|=== +|Property +|Type +|Editable +|`id` +a| string +a| false +|`type` +a| ArtifactType +a| false +|`state` +a| ArtifactState +a| true +|`version` +a| integer +a| false +|`createdBy` +a| string +a| false +|`createdOn` +a| date +a| false +|`modifiedBy` +a| string +a| false +|`modifiedOn` +a| date +a| false +|`name` +a| string +a| true +|`description` +a| string +a| true +|`labels` +a| array of string +a| true +|`properties` +a| map +a| true +|=== + +.Updating artifact metadata +* You can use the {registry} REST API update the set of editable properties using the metadata endpoints. + +* You can edit the `state` property only by using the state transition API. For example, you can mark an artifact as `deprecated` or `disabled`. + +.Additional resources +For more details, see the `/artifacts/{artifactId}/meta` sections in the link:{attachmentsdir}/registry-rest-api.htm[Apicurio Registry REST API documentation]. diff --git a/docs/modules/ROOT/partials/getting-started/ref-registry-client.adoc b/docs/modules/ROOT/partials/getting-started/ref-registry-client.adoc index 4109ffc7f7..00be8323aa 100644 --- a/docs/modules/ROOT/partials/getting-started/ref-registry-client.adoc +++ b/docs/modules/ROOT/partials/getting-started/ref-registry-client.adoc @@ -1,27 +1,39 @@ // Metadata created by nebel // ParentAssemblies: assemblies/getting-started/assembly-using-the-registry-client.adoc -[id="registry-client-types"] -= {registry} Java client reference -The {registry} Java client includes the following configuration options, based on the client factory. +[id="registry-client-config"] += {registry} Java client configuration +The {registry} Java client includes the following configuration options, based on the client factory: -.{registry} Java client options -[%header,cols=3*] +.{registry} Java client configuration options +[%header,cols="1,2,1"] |=== |Option |Description |Arguments -|Plain Client +|Plain client |Basic REST client used to interact with a running registry. -|baseUrl +|`baseUrl` |Custom HTTP client |Registry client using an OkHttpClient provided by the user. -|baseUrl, okhttpClient -|Custom Configuration +|`baseUrl`, `okhttpClient` +|Custom configuration |Registry client that accepts a map containing custom configuration. This is useful, for example, to add custom headers to the calls. -|baseUrl, Map configs +|`baseUrl`, `Map configs` |=== +[discrete] +== Custom header configuration +To configure custom headers, you must add the `apicurio.registry.request.headers` prefix to the `configs` map key. For example, a key of `apicurio.registry.request.headers.Authorization` with a value of `Basic: xxxxx` results in a header of `Authorization` with value of `Basic: xxxxx`. -In order to configure custom headers, the prefix *apicurio.registry.request.headers* must be added to the configs map key, for example, a key *apicurio.registry.request.headers.Authorization* with value Basic: xxxxx would result in a header of *Authorization* with value Basic: xxxxx. +[discrete] +== TLS configuration +You can configure Transport Layer Security (TLS) authentication for the {registry} Java client using the following properties: +* `apicurio.registry.request.ssl.truststore.location` +* `apicurio.registry.request.ssl.truststore.password` +* `apicurio.registry.request.ssl.truststore.type` +* `apicurio.registry.request.ssl.keystore.location` +* `apicurio.registry.request.ssl.keystore.password` +* `apicurio.registry.request.ssl.keystore.type` +* `apicurio.registry.request.ssl.key.password` diff --git a/docs/modules/ROOT/partials/getting-started/ref-registry-rule-maturity-matrix.adoc b/docs/modules/ROOT/partials/getting-started/ref-registry-rule-maturity-matrix.adoc index bf40fe2055..6885335991 100644 --- a/docs/modules/ROOT/partials/getting-started/ref-registry-rule-maturity-matrix.adoc +++ b/docs/modules/ROOT/partials/getting-started/ref-registry-rule-maturity-matrix.adoc @@ -2,18 +2,17 @@ // ParentAssemblies: assemblies/getting-started/as_registry-reference.adoc [id="registry-rule-maturity-matrix"] -= {registry} content rule maturity matrix += {registry} content rule maturity -Not all rules are fully implemented for every artifact type supported by the registry. -The following table documents the current maturity level for each rule and each -artifact type. +Not all content rules are fully implemented for every artifact type supported by {registry}. +The following table shows the current maturity level for each rule and artifact type. .{registry} content rule maturity matrix [%header,cols=3*] |=== -|Artifact Type -|Validity Rule -|Compatibility Rule +|Artifact type +|Validity rule +|Compatibility rule |*Avro* a| Full a| Full From c87d0263ad9f4ef993e3a70424b2a246e6582ff9 Mon Sep 17 00:00:00 2001 From: Stephen McCarthy <29098561+smccarthy-ie@users.noreply.github.com> Date: Tue, 17 Nov 2020 10:02:31 +0000 Subject: [PATCH 18/31] add revew feedback and tidy up (#1013) --- docs/getting-started/master.adoc | 4 +- docs/local-test-playbook.yml | 7 ++-- .../assembly-configuring-the-registry.adoc | 2 +- ...ssembly-installing-registry-openshift.adoc | 2 +- .../assembly-intro-to-registry-rules.adoc | 6 +-- .../assembly-intro-to-the-registry.adoc | 4 +- .../assembly-using-kafka-client-serdes.adoc | 4 +- .../con-kafka-connect-converters.adoc | 4 +- .../con-registry-artifacts.adoc | 2 +- .../getting-started/con-registry-demo.adoc | 4 -- .../getting-started/con-registry-distros.adoc | 8 ++-- .../getting-started/con-registry-rules.adoc | 2 +- .../con-registry-serdes-concepts.adoc | 4 +- .../con-registry-serdes-constants.adoc | 2 +- .../con-registry-serdes-strategy.adoc | 24 ++++++------ .../con-registry-serdes-types.adoc | 4 +- .../getting-started/con-registry-storage.adoc | 1 + .../proc-configuring-registry-ui.adoc | 2 +- .../proc-installing-registry-operatorhub.adoc | 4 +- .../proc-registry-serdes-config-consumer.adoc | 19 ++++------ .../proc-registry-serdes-config-producer.adoc | 23 +++++------- .../proc-registry-serdes-config-stream.adoc | 10 ++--- .../proc-registry-serdes-register.adoc | 6 +-- ...proc-setting-up-kafka-streams-storage.adoc | 9 +++-- .../ref-registry-artifact-metadata.adoc | 2 +- .../partials/shared/attributes-links.adoc | 17 +++++++-- .../ROOT/partials/shared/attributes.adoc | 37 ++++++++++--------- 27 files changed, 106 insertions(+), 107 deletions(-) diff --git a/docs/getting-started/master.adoc b/docs/getting-started/master.adoc index c527cec638..777b0eaaac 100644 --- a/docs/getting-started/master.adoc +++ b/docs/getting-started/master.adoc @@ -19,9 +19,9 @@ include::attributes.adoc[] include::assemblies/getting-started/assembly-intro-to-the-registry.adoc[leveloffset=+1] include::assemblies/getting-started/assembly-intro-to-registry-rules.adoc[leveloffset=+1] ifdef::apicurio-registry[] -include::assemblies/getting-started/assembly-installing-the-registry-docker.adoc[leveloffset=+1] +include::assemblies/getting-started/assembly-installing-registry-docker.adoc[leveloffset=+1] endif::[] -include::assemblies/getting-started/assembly-installing-the-registry-openshift.adoc[leveloffset=+1] +include::assemblies/getting-started/assembly-installing-registry-openshift.adoc[leveloffset=+1] include::assemblies/getting-started/assembly-configuring-the-registry.adoc[leveloffset=+1] include::assemblies/getting-started/assembly-managing-registry-artifacts-ui.adoc[leveloffset=+1] include::assemblies/getting-started/assembly-managing-registry-artifacts-api.adoc[leveloffset=+1] diff --git a/docs/local-test-playbook.yml b/docs/local-test-playbook.yml index 00a872b36d..398fd30b01 100644 --- a/docs/local-test-playbook.yml +++ b/docs/local-test-playbook.yml @@ -27,9 +27,10 @@ asciidoc: mod-loc: partial$ registry-overview: link:assembly-intro-to-the-registry.html[Introduction to Apicurio Registry] registry-rules: link:assembly-intro-to-registry-rules.html[Introduction to Apicurio Registry rules] - registry-reference: link:assembly-registry-reference.html[Apicurio Registry artifact reference] + registry-reference: link:assembly-registry-reference.html[Apicurio Registry artifact reference] + installing-the-registry-docker: link:assembly-installing-registry-docker.html[Installing Apicurio Registry using Docker] + installing-the-registry-openshift: link:assembly-installing-registry-openshift.html[Installing Apicurio Registry on OpenShift] + installing-the-registry-storage-openshift: link:assembly-installing-registry-storage-openshift.html[Installing Apicurio Registry storage on OpenShift] managing-registry-artifacts-ui: link:assembly-managing-registry-artifacts-ui.html[Managing Apicurio Registry content using the web console] managing-registry-artifacts-api: link:assembly-managing-registry-artifacts-api.html[Managing Apicurio Registry content using the REST API] - installing-the-registry-docker: link:assembly-installing-the-registry-docker.html[Installing Apicurio Registry using Docker] - installing-the-registry-openshift: link:assembly-installing-the-registry-openshift.html[Installing Apicurio Registry on OpenShift] kafka-client-serdes: link:assembly-using-kafka-client-serdes.html[Validating schemas using Kafka client serializers/deserializers] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-configuring-the-registry.adoc b/docs/modules/ROOT/pages/getting-started/assembly-configuring-the-registry.adoc index 524b8cd34b..7279cb5763 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-configuring-the-registry.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-configuring-the-registry.adoc @@ -2,7 +2,7 @@ include::{mod-loc}shared/all-attributes.adoc[] [id="configuring-the-registry"] -= Managing {registry} deployment on OpenShift += Configuring {registry} deployment on OpenShift This chapter explains how to configure optional settings for {registry} health checks on OpenShift: diff --git a/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-openshift.adoc b/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-openshift.adoc index a6b901af1f..1bee9b27a6 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-openshift.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-openshift.adoc @@ -13,7 +13,7 @@ This chapter explains how to install {registry}: .Prerequisites * {registry-overview} -NOTE: You can install more than one instance of {registry} depending on your environment. The number of instances depends on the number and type of artifacts stored in {registry}, and on your chosen storage option, for example, Kafka Streams, database, Infinispan cluster configuration. +NOTE: You can install more than one instance of {registry} depending on your environment. The number of instances depends on the number and type of artifacts stored in {registry} and on your chosen storage option. ifdef::apicurio-registry[] .Additional resources diff --git a/docs/modules/ROOT/pages/getting-started/assembly-intro-to-registry-rules.adoc b/docs/modules/ROOT/pages/getting-started/assembly-intro-to-registry-rules.adoc index 09f505ad5f..3a2701cc7a 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-intro-to-registry-rules.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-intro-to-registry-rules.adoc @@ -1,16 +1,16 @@ // Metadata created by nebel - include::{mod-loc}shared/all-attributes.adoc[] [id="intro-to-registry-rules"] = {registry} content rules //If the assembly covers a task, start the title with a verb in the gerund form, such as Creating or Configuring. -This chapter introduces the optional rules used to govern registry content and provides details on the available rule types: +This chapter introduces the optional rules used to govern registry content and provides details on the available rule configuration: * xref:registry-rules[] * xref:registry-rules-apply[] -* xref:registry-rules-work[] +* xref:registry-rules-work[] +* xref:registry-rules-config[] //INCLUDES include::{mod-loc}getting-started/con-registry-rules.adoc[leveloffset=+1] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-intro-to-the-registry.adoc b/docs/modules/ROOT/pages/getting-started/assembly-intro-to-the-registry.adoc index 2b0a65a0e7..30c1230ee9 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-intro-to-the-registry.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-intro-to-the-registry.adoc @@ -10,8 +10,8 @@ This chapter introduces {registry} concepts and features and provides details on * xref:registry-overview[] * xref:registry-artifacts[] -* xref:registry-web-console[] * xref:registry-storage[] +* xref:registry-web-console[] * xref:client-serde[] * xref:kafka-connect[] * xref:registry-demo[] @@ -20,8 +20,8 @@ This chapter introduces {registry} concepts and features and provides details on //INCLUDES include::{mod-loc}getting-started/con-registry-overview.adoc[leveloffset=+1] include::{mod-loc}getting-started/con-registry-artifacts.adoc[leveloffset=+1] -include::{mod-loc}getting-started/con-registry-web-console.adoc[leveloffset=+1] include::{mod-loc}getting-started/con-registry-storage.adoc[leveloffset=+1] +include::{mod-loc}getting-started/con-registry-web-console.adoc[leveloffset=+1] include::{mod-loc}getting-started/con-registry-serde.adoc[leveloffset=+1] include::{mod-loc}getting-started/con-kafka-connect-converters.adoc[leveloffset=+1] include::{mod-loc}getting-started/con-registry-demo.adoc[leveloffset=+1] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-using-kafka-client-serdes.adoc b/docs/modules/ROOT/pages/getting-started/assembly-using-kafka-client-serdes.adoc index 361721ae07..991d78da82 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-using-kafka-client-serdes.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-using-kafka-client-serdes.adoc @@ -11,9 +11,9 @@ include::{mod-loc}shared/all-attributes.adoc[] This chapter provides instructions on how to use the Kafka client serializers and deserializers for Apache Avro, JSON Schema, and Google Protobuf in your Kafka producer and consumer client applications: * xref:registry-serdes-concepts-serde-{context}[] -* xref:registry-serdes-types-serde-{context}[] * xref:registry-serdes-concepts-strategy-{context}[] * xref:registry-serdes-concepts-constants-{context}[] +* xref:registry-serdes-types-serde-{context}[] * xref:registry-serdes-register-{context}[] * xref:registry-serdes-config-consumer-{context}[] * xref:registry-serdes-config-producer-{context}[] @@ -31,9 +31,9 @@ endif::[] //INCLUDES include::{mod-loc}getting-started/con-registry-serdes-concepts.adoc[leveloffset=+1] -include::{mod-loc}getting-started/con-registry-serdes-types.adoc[leveloffset=+1] include::{mod-loc}getting-started/con-registry-serdes-strategy.adoc[leveloffset=+1] include::{mod-loc}getting-started/con-registry-serdes-constants.adoc[leveloffset=+1] +include::{mod-loc}getting-started/con-registry-serdes-types.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-registry-serdes-register.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-registry-serdes-config-consumer.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-registry-serdes-config-producer.adoc[leveloffset=+1] diff --git a/docs/modules/ROOT/partials/getting-started/con-kafka-connect-converters.adoc b/docs/modules/ROOT/partials/getting-started/con-kafka-connect-converters.adoc index 3b64dcc06f..17a840ef98 100644 --- a/docs/modules/ROOT/partials/getting-started/con-kafka-connect-converters.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-kafka-connect-converters.adoc @@ -2,7 +2,7 @@ [id="kafka-connect"] -= Stream data to external sytems with Kafka Connect converters += Stream data to external systems with Kafka Connect converters You can use {registry} with Apache Kafka Connect to stream data between Kafka and external systems. Using Kafka Connect, you can define connectors for different systems to move large volumes of data into and out of Kafka-based systems. .{registry} and Kafka Connect architecture @@ -27,4 +27,4 @@ ifdef::rh-service-registry[] * link:{LinkDebeziumUserGuide}#avro-serialization[Avro serialization in Debezium User Guide] * link:{LinkCamelKafkaConnectorGetStart}[{NameCamelKafkaConnectorGetStart}] endif::[] -* link:https://debezium.io/blog/2020/04/09/using-debezium-wit-apicurio-api-schema-registry/[Demonstration of using Kakfa Connect with Debezium and Apicurio Registry] +* link:https://debezium.io/blog/2020/04/09/using-debezium-wit-apicurio-api-schema-registry/[Demonstration of using Kafka Connect with Debezium and Apicurio Registry] diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-artifacts.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-artifacts.adoc index b03d211588..3d7d0a4e32 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-artifacts.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-artifacts.adoc @@ -1,7 +1,7 @@ // Metadata created by nebel [id="registry-artifacts"] -= Store schema and API artifacts in {registry} += Schema and API artifacts in {registry} The items stored in {registry}, such as event schemas and API specifications, are known as registry _artifacts_. The following shows an example of an Apache Avro schema artifact in JSON format for a simple share price application: diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-demo.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-demo.adoc index 355d9e17b6..56926d2839 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-demo.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-demo.adoc @@ -15,7 +15,3 @@ * REST client example For more details, see link:https://github.com/Apicurio/apicurio-registry-examples[] - -ifdef::rh-service-registry[] -For another open source demonstration example with detailed instructions on Avro serialization/deserialization with storage in Apache Kafka, see the Red Hat Developer article on link:https://developers.redhat.com/blog/2019/12/16/getting-started-with-red-hat-integration-service-registry/[Getting Started with Red Hat Integration Service Registry]. -endif::[] diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-distros.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-distros.adoc index c2dbfac78c..69c22ee5e2 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-distros.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-distros.adoc @@ -72,16 +72,16 @@ endif::[] |Location |Release |Example custom resource definitions for installation -|link:{download-url-registry-custom-resources}[Software Downloads for Red Hat Integration] +|link:{download-url-registry-distribution}[Software Downloads for Red Hat Integration] |General Availability and Technical Preview |Kafka Connect converters -|link:{download-url-registry-kafka-connect}[Software Downloads for Red Hat Integration] +|link:{download-url-registry-distribution}[Software Downloads for Red Hat Integration] |General Availability |Maven repository -|link:{download-url-registry-maven}[Software Downloads for Red Hat Integration] +|link:{download-url-registry-distribution}[Software Downloads for Red Hat Integration] |General Availability |Source code -|link:{download-url-registry-source-code}[Software Downloads for Red Hat Integration] +|link:{download-url-registry-distribution}[Software Downloads for Red Hat Integration] |General Availability |=== diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-rules.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-rules.adoc index 3d641f0e75..909bde5917 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-rules.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-rules.adoc @@ -36,7 +36,7 @@ For more details, see {registry-reference}. [id="registry-rules-config"] = Content rule configuration -You can configure rules individually for each artifact, as well as globally. {registry} applies the rules configured for the specific artifact. But if no rules are configured at that level, {registry} applies the globally configured rules. If no global rules are configured, no rules are applied. +You can configure rules individually for each artifact, as well as globally. {registry} applies the rules configured for the specific artifact. If no rules are configured at that level, {registry} applies the globally configured rules. If no global rules are configured, no rules are applied. [discrete] == Configure artifact rules diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-concepts.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-concepts.adoc index 0a0ae09bbc..46c6b93b9a 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-concepts.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-concepts.adoc @@ -40,8 +40,8 @@ A consumer client application uses a deserializer to get the messages that it co To enable a consumer to use {registry} for deserialization: -* xref:service-registry-register-{context}[Define and register your schema with {registry}] -* xref:service-registry-config-consumer-{context}[Configure the consumer client code]: +* xref:registry-serdes-register-{context}[Define and register your schema with {registry}] +* xref:registry-serdes-config-consumer-{context}[Configure the consumer client code]: ** URL of {registry} ** {registry} deserializer to use with the messages ** Input data stream for deserialization diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-constants.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-constants.adoc index 710a26b0c2..8f4454f298 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-constants.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-constants.adoc @@ -6,7 +6,7 @@ You can configure specific client serializer/deserializer (SerDe) services and schema lookup strategies directly into a client using the constants outlined in this section. -Alternatively, you can use specify the constants in a properties file, or a properties instance. +Alternatively, you can specify the constants in a properties file, or a properties instance. [discrete] == Constants for serializer/deserializer services diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc index c938836e86..50fcb3b072 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc @@ -2,7 +2,7 @@ // assembly-using-kafka-client-serdes [id='registry-serdes-concepts-strategy-{context}'] -= Strategies to lookup a schema += Strategies to look up a schema The Kafka client serializer uses two lookup _strategies_ to determine the artifact ID and global ID under which the message schema is registered in {registry}. @@ -18,6 +18,17 @@ The artifact ID strategy provides a way to map the Kafka topic and message infor However, you can use alternative conventions for the mapping, either by using an alternative strategy provided by {registry} or by creating a custom Java class that implements `io.apicurio.registry.utils.serde.strategy.ArtifactIdStrategy`. +[discrete] +[id='service-registry-concepts-artifactid-{context}'] +== Strategies to return an artifact ID + +Strategies to return an artifact ID based on an implementation of `ArtifactIdStrategy`: + +`RecordIdStrategy`:: Avro-specific strategy that uses the full name of the schema. +`TopicRecordIdStrategy`:: Avro-specific strategy that uses the topic name and the full name of the schema. +`TopicIdStrategy`:: (Default) strategy that uses the topic name and `key` or `value` suffix. +`SimpleTopicIdStrategy`:: Simple strategy that only uses the topic name. + [discrete] == Global ID strategy @@ -44,17 +55,6 @@ public String artifactId(String topic, boolean isKey, T schema) { What lookup strategy you use depends on how and where you store your schema. For example, you might use a strategy that uses a _record ID_ if you have different Kafka topics with the same Avro message type. -[discrete] -[id='service-registry-concepts-artifactid-{context}'] -== Strategies to return an artifact ID - -Strategies to return an artifact ID based on an implementation of `ArtifactIdStrategy`: - -`RecordIdStrategy`:: Avro-specific strategy that uses the full name of the schema. -`TopicRecordIdStrategy`:: Avro-specific strategy that uses the topic name and the full name of the schema. -`TopicIdStrategy`:: (Default) strategy that uses the topic name and `key` or `value` suffix. -`SimpleTopicIdStrategy`:: Simple strategy that only uses the topic name. - [discrete] [id='service-registry-concepts-globalid-{context}'] == Strategies to return a global ID diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-types.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-types.adoc index ca49069878..2b21536313 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-types.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-types.adoc @@ -104,7 +104,7 @@ You can configure the Avro serializer class in the following ways: * Avro encoding .Global ID location -lizer is responsible for passing the unique global ID of the schema as part of the Kafka message so that consumers can use the right schema for deserialization. The location of that global ID can be in the payload of the message or in the message headers. The default approach is to pass the global ID in the message payload. If you want the ID sent in the message headers instead, you can set the following configuration property: +The serializer passes the unique global ID of the schema as part of the Kafka message so that consumers can use the right schema for deserialization. The location of that global ID can be in the payload of the message or in the message headers. The default approach is to pass the global ID in the message payload. If you want the ID sent in the message headers instead, you can set the following configuration property: ---- props.putIfAbsent(AbstractKafkaSerDe.USE_HEADERS, "true") ---- @@ -123,7 +123,7 @@ that interface: {registry} represents the global ID of an artifact as a long, but for legacy reasons (or for compatibility with other registries or serde classes) you may want to use 4 bytes when sending the ID. .Avro datum provider -Avro provides different Datum writers and readers to write and read data. {registry} supports three different types: +Avro provides different datum writers and readers to write and read data. {registry} supports three different types: * Generic * Specific diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-storage.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-storage.adoc index 202af8236b..acbb1ac7ad 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-storage.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-storage.adoc @@ -47,3 +47,4 @@ ifdef::apicurio-registry[] * {installing-the-registry-docker} endif::[] * {installing-the-registry-openshift} +* {installing-the-registry-storage-openshift} diff --git a/docs/modules/ROOT/partials/getting-started/proc-configuring-registry-ui.adoc b/docs/modules/ROOT/partials/getting-started/proc-configuring-registry-ui.adoc index eec08c129a..1a356e6437 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-configuring-registry-ui.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-configuring-registry-ui.adoc @@ -1,6 +1,6 @@ [id="configuring-registry-ui"] -= Configuring {registry} web console += Configuring the {registry} web console You can configure the {registry} web console specifically for your deployment environment or to customize its behavior. This section provides details on how to configure optional environment variables for the {registry} web console. diff --git a/docs/modules/ROOT/partials/getting-started/proc-installing-registry-operatorhub.adoc b/docs/modules/ROOT/partials/getting-started/proc-installing-registry-operatorhub.adoc index 747f811dd1..6e0f7f3b39 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-installing-registry-operatorhub.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-installing-registry-operatorhub.adoc @@ -33,7 +33,9 @@ endif::[] . Select your subscription settings, for example: ** *Installation Mode* > *A specific namespace on the cluster* > *my-project* ifdef::rh-service-registry[] -** *Update Channel* > *serviceregistry-1.0* +** *Update Channel* > Select one of the following channels: +*** *serviceregistry-1*: All minor and patch updates, such as version 1.1.0 and 1.0.1. For example, a {registry} installation on version 1.0.x automatically upgrades to 1.1.x releases. +*** *serviceregistry-1.0*: Patch updates only, such as version 1.0.1 and 1.0.2. For example, a {registry} installation on version 1.0.x automatically ignores any 1.1.x releases. endif::[] ifdef::apicurio-registry[] ** *Update Channel* > *alpha* diff --git a/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-consumer.adoc b/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-consumer.adoc index 19fb7402a3..0ae08a6257 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-consumer.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-consumer.adoc @@ -13,20 +13,16 @@ This procedure describes how to configure a Kafka consumer client written in Jav .Procedure -. Configure the client with the URL of {registry}. -+ -For example: +. Configure the client with the URL of {registry}. For example: + [source,shell,subs="+quotes,attributes"] ---- String registryUrl = "https://registry.example.com/api"; Properties props = new Properties(); -props.putIfAbsent(AbstractKafkaSerDe.REGISTRY_URL_CONFIG_PARAM, registryUrl); <1> +props.putIfAbsent(AbstractKafkaSerDe.REGISTRY_URL_CONFIG_PARAM, registryUrl); ---- -. Configure the client with the {registry} deserializer. -+ -For example: +. Configure the client with the {registry} deserializer. For example: + [source,java,subs="+quotes,attributes"] ---- @@ -37,10 +33,9 @@ props.putIfAbsent(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true"); props.putIfAbsent(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000"); props.putIfAbsent(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); props.putIfAbsent(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, - AvroKafkaDeserializer.class.getName()); <2> <3> + AvroKafkaDeserializer.class.getName()); <1> props.putIfAbsent(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, - AvroKafkaDeserializer.class.getName()); <2> <3> + AvroKafkaDeserializer.class.getName()); <2> ---- -<1> The {registry} URL. -<2> The deserializer provided by {registry}. -<3> The deserialization is in _Apache Avro_ JSON format. +<1> The deserializer provided by {registry}. +<2> The deserialization is in Apache Avro JSON format. diff --git a/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-producer.adoc b/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-producer.adoc index e82fb0237b..bfcdf27345 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-producer.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-producer.adoc @@ -13,29 +13,24 @@ This procedure describes how to configure a Kafka producer client written in Jav .Procedure -. Configure the client with the URL of {registry}. -+ -For example: +. Configure the client with the URL of {registry}. For example: + [source,java,subs="+quotes,attributes"] ---- String registryUrl = "https://registry.example.com/api"; Properties props = new Properties(); -props.putIfAbsent(AbstractKafkaSerDe.REGISTRY_URL_CONFIG_PARAM, registryUrl); <1> +props.putIfAbsent(AbstractKafkaSerDe.REGISTRY_URL_CONFIG_PARAM, registryUrl); ---- -. Configure the client with the serializer, and the and the strategy to look up the schema in {registry}. -+ -For example: +. Configure the client with the serializer, and the strategy to look up the schema in {registry}. For example: + [source,java,subs="+quotes,attributes"] ---- props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "my-cluster-kafka-bootstrap:9092"); -props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, AvroKafkaSerializer.class.getName()); <2> -props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, AvroKafkaSerializer.class.getName()); <3> -props.put(AbstractKafkaSerializer.REGISTRY_GLOBAL_ID_STRATEGY_CONFIG_PARAM, FindLatestIdStrategy.class.getName()); <4> +props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, AvroKafkaSerializer.class.getName()); <1> +props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, AvroKafkaSerializer.class.getName()); <2> +props.put(AbstractKafkaSerializer.REGISTRY_GLOBAL_ID_STRATEGY_CONFIG_PARAM, FindLatestIdStrategy.class.getName()); <3> ---- -<1> The {registry} URL. -<2> The serializer for the message _key_ provided by {registry}. -<3> The serializer for the message _value_ provided by {registry}. -<4> Lookup strategy to find the global ID for the schema. +<1> The serializer for the message key provided by {registry}. +<2> The serializer for the message value provided by {registry}. +<3> Lookup strategy to find the global ID for the schema. diff --git a/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-stream.adoc b/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-stream.adoc index 8c433f1267..a1c1d66fd3 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-stream.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-stream.adoc @@ -13,9 +13,7 @@ This procedure describes how to configure a Kafka Streams client written in Java .Procedure -. Create and configure a REST client with the {registry} -+ -For example: +. Create and configure a REST client with the {registry}. For example: + [source,shell,subs="+quotes,attributes"] ---- @@ -23,9 +21,7 @@ String registryUrl = "https://registry.example.com/api"; RegistryService client = RegistryClient.cached(registryUrl); ---- -. Configure the serializer, deserializer, and create the Kafka Streams client -+ -For example: +. Configure the serializer, deserializer, and create the Kafka Streams client. For example: + [source,java,subs="+quotes,attributes"] ---- @@ -48,5 +44,5 @@ KStream input = builder.stream( <4> ---- <1> The serializer provided by {registry}. <2> The deserializer provided by {registry}. -<3> The deserialization is in _Apache Avro_ format. +<3> The deserialization is in Apache Avro format. <4> The Kafka Streams client application. diff --git a/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-register.adoc b/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-register.adoc index f786b7c776..6e59dc1fa8 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-register.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-register.adoc @@ -4,7 +4,7 @@ [id='registry-serdes-register-{context}'] = Registering a schema in {registry} -After you have defined a schema in the appropriate format, such as _Apache Avro_, you can add the schema to {registry}. +After you have defined a schema in the appropriate format, such as Apache Avro, you can add the schema to {registry}. You can add the schema using: @@ -45,8 +45,8 @@ curl -X POST -H "Content-type: application/json; artifactType=AVRO" \ }' https://my-cluster-service-registry-myproject.example.com/api/artifacts -s <2> ---- -<1> Avro schema -<2> OpenShift route name that exposes {registry} +<1> Avro schema artifact. +<2> OpenShift route name that exposes {registry}. [discrete] diff --git a/docs/modules/ROOT/partials/getting-started/proc-setting-up-kafka-streams-storage.adoc b/docs/modules/ROOT/partials/getting-started/proc-setting-up-kafka-streams-storage.adoc index 56f3def9c4..9e18cc1c69 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-setting-up-kafka-streams-storage.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-setting-up-kafka-streams-storage.adoc @@ -93,11 +93,12 @@ necessary to do this when sharing the Kafka cluster with other applications that Change the default topic names by overriding them either by setting appropriate environment variables or by setting appropriate Java system properties: -[%header,cols=3*] +.Environment variables for Kafka topic names +[%header,cols="1,2,2"] |=== -|Topic Default -|Environment Variable -|System Property +|Topic default +|Environment variable +|System property |`storage-topic` a| `REGISTRY_STREAMS_TOPOLOGY_STORAGE_TOPIC` a| `registry.streams.topology.storage.topic` diff --git a/docs/modules/ROOT/partials/getting-started/ref-registry-artifact-metadata.adoc b/docs/modules/ROOT/partials/getting-started/ref-registry-artifact-metadata.adoc index 037b7a302d..9ea0a721f7 100644 --- a/docs/modules/ROOT/partials/getting-started/ref-registry-artifact-metadata.adoc +++ b/docs/modules/ROOT/partials/getting-started/ref-registry-artifact-metadata.adoc @@ -50,7 +50,7 @@ a| true |=== .Updating artifact metadata -* You can use the {registry} REST API update the set of editable properties using the metadata endpoints. +* You can use the {registry} REST API to update the set of editable properties using the metadata endpoints. * You can edit the `state` property only by using the state transition API. For example, you can mark an artifact as `deprecated` or `disabled`. diff --git a/docs/modules/ROOT/partials/shared/attributes-links.adoc b/docs/modules/ROOT/partials/shared/attributes-links.adoc index 0951184d54..c1e4851362 100644 --- a/docs/modules/ROOT/partials/shared/attributes-links.adoc +++ b/docs/modules/ROOT/partials/shared/attributes-links.adoc @@ -1,3 +1,12 @@ +:LinkRedHatIntegrationDownloads: https://access.redhat.com/jbossnetwork/restricted/listSoftware.html?downloadType=distributions&product=red.hat.integration +:NameRedHatIntegrationDownloads: Red Hat Integration Downloads + +:LinkOLMDocs: https://docs.openshift.com/container-platform/4.6/operators/understanding/olm/olm-understanding-olm.html +:NameOLMDocs: Operator Lifecycle Manager + +:LinkOperatorHub: https://docs.openshift.com/container-platform/4.6/operators/understanding/olm-understanding-operatorhub.html +:NameOperatorHub: OperatorHub + // JBoss Fuse titles :LinkCXFDevGuide: https://access.redhat.com/documentation/en-us/red_hat_fuse/{fuse-version}/html-single/apache_cxf_development_guide/index :NameOfCXFDevGuide: Apache CXF Development Guide @@ -151,16 +160,16 @@ // Debezium titles -:LinkDebeziumInstallOpenShift: https://access.redhat.com/documentation/en-us/red_hat_integration/{version}/html-single/installing_change_data_capture_on_openshift/ +:LinkDebeziumInstallOpenShift: https://access.redhat.com/documentation/en-us/red_hat_integration/2020-q3/html-single/installing_change_data_capture_on_openshift/ :NameDebeziumInstallOpenShift: Installing Debezium on OpenShift -:LinkDebeziumInstallRHEL: https://access.redhat.com/documentation/en-us/red_hat_integration/{version}/html-single/installing_change_data_capture_on_rhel/ +:LinkDebeziumInstallRHEL: https://access.redhat.com/documentation/en-us/red_hat_integration/2020-q3/html-single/installing_change_data_capture_on_rhel/ :NameDebeziumInstallRHEL: Installing Debezium on RHEL -:LinkDebeziumGettingStarted: https://access.redhat.com/documentation/en-us/red_hat_integration/{version}/html-single/getting_started_with_change_data_capture/index +:LinkDebeziumGettingStarted: https://access.redhat.com/documentation/en-us/red_hat_integration/2020-q3/html-single/getting_started_with_change_data_capture/index :NameDebeziumGettingStarted: Getting Started with Debezium -:LinkDebeziumUserGuide: https://access.redhat.com/documentation/en-us/red_hat_integration/{version}/html-single/debezium_user_guide/index +:LinkDebeziumUserGuide: https://access.redhat.com/documentation/en-us/red_hat_integration/2020-q3/html-single/debezium_user_guide/index :NameDebeziumUserGuide: Debezium User Guide // Debezium link attributes that are used upstream. Add attributes as needed. diff --git a/docs/modules/ROOT/partials/shared/attributes.adoc b/docs/modules/ROOT/partials/shared/attributes.adoc index 8ab4fc8f3d..ff112546f8 100644 --- a/docs/modules/ROOT/partials/shared/attributes.adoc +++ b/docs/modules/ROOT/partials/shared/attributes.adoc @@ -13,8 +13,6 @@ // * Downstream-only content agged with ifdef::rh-service-registry[]...endif::[] // Untagged content is common - - // upstream :apicurio-registry: :registry: Apicurio Registry @@ -24,17 +22,22 @@ // downstream //:rh-service-registry: //:registry: Service Registry -//:ServiceRegistryName: Apicurio Registry //:kafka-streams: AMQ Streams //:registry-version: 1.0 -:registry-ocp-version: 4.5 -:version: 2020-Q3 -:context: registry + +//:attachmentsdir: files //integration products :fuse-version: 7.7 :amq-version: 7.7 -:3scale-version: 2.8 +:3scale-version: 2.9 + +//common +:version: 2020-Q4 +:registry-ocp-version: 4.5 +:context: registry + + // Characters :copy: © @@ -50,17 +53,17 @@ include::attributes-links.adoc[] // Download URLs :download-url-registry-container-catalog: https://catalog.redhat.com/software/containers/search -:download-url-registry-maven: https://access.redhat.com/jbossnetwork/restricted/listSoftware.html?downloadType=distributions&product=red.hat.integration&version=2020-Q2 -:download-url-registry-source-code: https://access.redhat.com/jbossnetwork/restricted/listSoftware.html?downloadType=distributions&product=red.hat.integration&version=2020-Q2 -:download-url-registry-kafka-connect: https://access.redhat.com/jbossnetwork/restricted/listSoftware.html?downloadType=distributions&product=red.hat.integration&version=2020-Q2 -:download-url-registry-custom-resources: https://access.redhat.com/jbossnetwork/restricted/listSoftware.html?downloadType=distributions&product=red.hat.integration&version=2020-Q2 +:download-url-registry-distribution: https://access.redhat.com/jbossnetwork/restricted/listSoftware.html?downloadType=distributions&product=red.hat.integration&version={version} + // internal links -:registry-overview: xref:registry-overview[] -:registry-rules: xref:registry-rules[] -:registry-artifact-types: xref:registry-artifact-types[] -:registry-rule-types: xref:registry-rule-types[] -:registry-rule-maturity-matrix: xref:registry-rule-maturity-matrix[] +:registry-overview: xref:intro-to-the-registry[] +:registry-rules: xref:intro-to-registry-rules[] +:registry-artifact-types: xref:artifact-and-rule-types[] +:registry-rule-types: xref:artifact-and-rule-types[] :managing-registry-artifacts-ui: xref:managing-registry-artifacts-ui[] +:installing-the-registry-openshift: xref:installing-registry-ocp[] :installing-the-registry-docker: xref:installing-the-registry-docker[] -:installing-the-registry-openshift: xref:installing-the-registry[] +:registry-reference: xref:artifact-and-rule-types[] +:managing-registry-artifacts-api: xref:managing-registry-artifacts-api[] +:kafka-client-serdes: xref:using-kafka-client-serdes[] From 0436784dc24b1d6e9716210227a2b072f51fc5ad Mon Sep 17 00:00:00 2001 From: Stephen McCarthy <29098561+smccarthy-ie@users.noreply.github.com> Date: Tue, 17 Nov 2020 20:46:37 +0000 Subject: [PATCH 19/31] more doc clean up, fomatting, modularization- no technical changes (#1016) --- .../assembly-installing-registry-docker.adoc | 1 - ...ssembly-installing-registry-openshift.adoc | 1 - .../assembly-intro-to-registry-rules.adoc | 2 +- .../assembly-intro-to-the-registry.adoc | 1 - ...embly-managing-registry-artifacts-api.adoc | 1 - ...bly-managing-registry-artifacts-maven.adoc | 1 - ...sembly-managing-registry-artifacts-ui.adoc | 1 - .../assembly-registry-reference.adoc | 1 - .../assembly-using-kafka-client-serdes.adoc | 8 +- .../con-registry-artifacts.adoc | 2 +- .../con-registry-serdes-avro.adoc | 90 ++++++++ .../con-registry-serdes-concepts.adoc | 6 +- .../con-registry-serdes-json.adoc | 42 ++++ .../con-registry-serdes-protobuf.adoc | 47 +++++ .../con-registry-serdes-strategy.adoc | 1 + .../con-registry-serdes-types.adoc | 195 ++---------------- ...roc-installing-postgresql-operatorhub.adoc | 9 - .../proc-registry-serdes-config-consumer.adoc | 2 +- .../proc-registry-serdes-config-producer.adoc | 2 +- .../proc-setting-up-infinispan-storage.adoc | 9 - .../proc-setting-up-postgresql-storage.adoc | 11 +- .../partials/shared/attributes-links.adoc | 8 +- .../ROOT/partials/shared/attributes.adoc | 28 ++- 23 files changed, 226 insertions(+), 243 deletions(-) create mode 100644 docs/modules/ROOT/partials/getting-started/con-registry-serdes-avro.adoc create mode 100644 docs/modules/ROOT/partials/getting-started/con-registry-serdes-json.adoc create mode 100644 docs/modules/ROOT/partials/getting-started/con-registry-serdes-protobuf.adoc diff --git a/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-docker.adoc b/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-docker.adoc index c0eda1102f..f393a6d42e 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-docker.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-docker.adoc @@ -1,5 +1,4 @@ // Metadata created by nebel - include::{mod-loc}shared/all-attributes.adoc[] [id="installing-the-registry-docker"] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-openshift.adoc b/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-openshift.adoc index 1bee9b27a6..1757e24e0a 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-openshift.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-openshift.adoc @@ -1,5 +1,4 @@ // Metadata created by nebel - include::{mod-loc}shared/all-attributes.adoc[] [id="installing-registry-ocp"] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-intro-to-registry-rules.adoc b/docs/modules/ROOT/pages/getting-started/assembly-intro-to-registry-rules.adoc index 3a2701cc7a..2c97634461 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-intro-to-registry-rules.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-intro-to-registry-rules.adoc @@ -10,7 +10,7 @@ This chapter introduces the optional rules used to govern registry content and p * xref:registry-rules[] * xref:registry-rules-apply[] * xref:registry-rules-work[] -* xref:registry-rules-config[] +* xref:registry-rules-config[] //INCLUDES include::{mod-loc}getting-started/con-registry-rules.adoc[leveloffset=+1] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-intro-to-the-registry.adoc b/docs/modules/ROOT/pages/getting-started/assembly-intro-to-the-registry.adoc index 30c1230ee9..26261033a0 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-intro-to-the-registry.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-intro-to-the-registry.adoc @@ -1,5 +1,4 @@ // Metadata created by nebel - include::{mod-loc}shared/all-attributes.adoc[] [id="intro-to-the-registry"] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-api.adoc b/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-api.adoc index cba301c3bb..8d630451b1 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-api.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-api.adoc @@ -1,5 +1,4 @@ // Metadata created by nebel - include::{mod-loc}shared/all-attributes.adoc[] [id="managing-registry-artifacts-api"] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-maven.adoc b/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-maven.adoc index b63a8461ec..ae03c18b4a 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-maven.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-maven.adoc @@ -1,5 +1,4 @@ // Metadata created by nebel - include::{mod-loc}shared/all-attributes.adoc[] [id="managing-registry-artifacts-maven"] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-ui.adoc b/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-ui.adoc index 33b6ca111a..a178e98923 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-ui.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-managing-registry-artifacts-ui.adoc @@ -1,5 +1,4 @@ // Metadata created by nebel - include::{mod-loc}shared/all-attributes.adoc[] [id="managing-registry-artifacts-ui"] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-registry-reference.adoc b/docs/modules/ROOT/pages/getting-started/assembly-registry-reference.adoc index 8319eda9f2..ae52869c66 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-registry-reference.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-registry-reference.adoc @@ -1,5 +1,4 @@ // Metadata created by nebel - include::{mod-loc}shared/all-attributes.adoc[] [id="artifact-and-rule-types"] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-using-kafka-client-serdes.adoc b/docs/modules/ROOT/pages/getting-started/assembly-using-kafka-client-serdes.adoc index 991d78da82..b71386d19d 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-using-kafka-client-serdes.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-using-kafka-client-serdes.adoc @@ -1,5 +1,4 @@ // Metadata created by nebel -// include::{mod-loc}shared/all-attributes.adoc[] [id="using-kafka-client-serdes"] @@ -14,6 +13,9 @@ This chapter provides instructions on how to use the Kafka client serializers an * xref:registry-serdes-concepts-strategy-{context}[] * xref:registry-serdes-concepts-constants-{context}[] * xref:registry-serdes-types-serde-{context}[] +* xref:registry-serdes-types-avro-{context}[] +* xref:registry-serdes-types-json-{context}[] +* xref:registry-serdes-types-protobuf-{context}[] * xref:registry-serdes-register-{context}[] * xref:registry-serdes-config-consumer-{context}[] * xref:registry-serdes-config-producer-{context}[] @@ -34,12 +36,14 @@ include::{mod-loc}getting-started/con-registry-serdes-concepts.adoc[leveloffset= include::{mod-loc}getting-started/con-registry-serdes-strategy.adoc[leveloffset=+1] include::{mod-loc}getting-started/con-registry-serdes-constants.adoc[leveloffset=+1] include::{mod-loc}getting-started/con-registry-serdes-types.adoc[leveloffset=+1] +include::{mod-loc}getting-started/con-registry-serdes-avro.adoc[leveloffset=+2] +include::{mod-loc}getting-started/con-registry-serdes-json.adoc[leveloffset=+2] +include::{mod-loc}getting-started/con-registry-serdes-protobuf.adoc[leveloffset=+2] include::{mod-loc}getting-started/proc-registry-serdes-register.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-registry-serdes-config-consumer.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-registry-serdes-config-producer.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-registry-serdes-config-stream.adoc[leveloffset=+1] - //.Additional resources (or Next steps) //* ... diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-artifacts.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-artifacts.adoc index 3d7d0a4e32..9667224634 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-artifacts.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-artifacts.adoc @@ -1,7 +1,7 @@ // Metadata created by nebel [id="registry-artifacts"] -= Schema and API artifacts in {registry} += Schema and API artifacts in {registry} The items stored in {registry}, such as event schemas and API specifications, are known as registry _artifacts_. The following shows an example of an Apache Avro schema artifact in JSON format for a simple share price application: diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-avro.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-avro.adoc new file mode 100644 index 0000000000..1f07782945 --- /dev/null +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-avro.adoc @@ -0,0 +1,90 @@ +// Module included in the following assemblies: +// assembly-using-kafka-client-serdes + +[id='registry-serdes-types-avro-{context}'] += Configure Avro SerDe with {registry} + +{registry} provides Kafka client serializer and deserializer classes for Apache Avro to make using Avro as +easy as possible: + +* `io.apicurio.registry.utils.serde.AvroKafkaSerializer` +* `io.apicurio.registry.utils.serde.AvroKafkaDeserializer` + + +.Configure the Avro serializer + +You can configure the Avro serializer class in the following ways: + +* {registry} location as a URL +* Artifact ID strategy +* Global ID strategy +* Global ID location +* Global ID handler +* Avro datum provider +* Avro encoding + +.Global ID location +The serializer passes the unique global ID of the schema as part of the Kafka message so that consumers can use the right schema for deserialization. The location of that global ID can be in the payload of the message or in the message headers. The default approach is to pass the global ID in the message payload. If you want the ID sent in the message headers instead, you can set the following configuration property: +---- +props.putIfAbsent(AbstractKafkaSerDe.USE_HEADERS, "true") +---- +The property name is `apicurio.registry.use.headers`. + + +.Global ID handler +You can customize precisely how the global ID is encoded when passing it in the Kafka message body. Set +the configuration property `apicurio.registry.id-handler` to be a class that implements the +`io.apicurio.registry.utils.serde.strategy.IdHandler` interface. {registry} provides two implementations of +that interface: + +* `io.apicurio.registry.utils.serde.strategy.DefaultIdHandler` - stores the ID as an 8 byte long +* `io.apicurio.registry.utils.serde.strategy.Legacy4ByteIdHandler` - stores the ID as an 4 byte int + +{registry} represents the global ID of an artifact as a long, but for legacy reasons (or for compatibility with other registries or serde classes) you may want to use 4 bytes when sending the ID. + +.Avro datum provider +Avro provides different datum writers and readers to write and read data. {registry} supports three different types: + +* Generic +* Specific +* Reflect + +The {registry} `AvroDatumProvider` is the abstraction on which type is then actually used, where `DefaultAvroDatumProvider` is used by default. + +There are two configuration options you can set: + +* `apicurio.registry.avro-datum-provider` - provide a fully qualified Java class name of the `AvroDatumProvider` implementation, for example `io.apicurio.registry.utils.serde.avro.ReflectAvroDatumProvider` +* `apicurio.registry.use-specific-avro-reader` - true or false, to use specific type when using `DefaultAvroDatumProvider` + +.Avro encoding + +When using Apache Avro to serializer data, it is common to use the Avro binary encoding format. This is so that the data is encoded in as efficient a format as possible. However, Avro also supports encoding the data as JSON. Encoding as JSON is useful because it is much easier to inspect the payload of each message, often for logging, debugging, or other similar use cases. The {registry} Avro serializer can be configured to change the encoding to JSON from the default (binary). + +Set the Avro encoding to use by configuring the `apicurio.avro.encoding` property. The value must be either +`JSON` or `BINARY`. + +.Configure the Avro deserializer + +You must configure the Avro deserializer class to match the configuration settings of the serializer. As a +result, you can configure the Avro deserializer class in the following ways: + +* {registry} location as a URL +* Global ID handler +* Avro datum provider +* Avro encoding + +See the serializer section for these configuration options - the property names and values are the same. + +[NOTE] +==== +The following options are not needed when configuring the deserializer: + +* Artifact ID strategy +* Global ID strategy +* Global ID location +==== + +The reason these options are not necessary is that the deserializer class can figure this information out from +the message itself. In the case of the two strategies, they are not needed because the serializer is responsible for sending the global ID of the schema as part of the message. + +The location of that global ID is determined by the deserializer by simply checking for the magic byte at the start of the message payload. If that byte is found, the global ID is read from the message payload using the configured handler. If the magic byte is not found, the global ID is read from the message headers. diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-concepts.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-concepts.adoc index 46c6b93b9a..0bd423d29b 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-concepts.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-concepts.adoc @@ -17,7 +17,8 @@ Schemas can evolve, so you can define rules in {registry}, for example, to ensur These schema technologies can be used by client applications through Kafka client serializer/deserializer (SerDe) services provided by {registry}. The maturity and usage of the SerDe classes provided by {registry} may vary. See the type-specific sections below for more details about each. -= Producer schema configuration +[discrete] +== Producer schema configuration A producer client application uses a serializer to put the messages that it sends to a specific broker topic into the correct data format. @@ -35,7 +36,8 @@ After registering your schema, when you start Kafka and {registry}, you can acce If a schema already exists, you can create a new version using the REST API based on compatibility rules defined in {registry}. Versions are used for compatibility checking as a schema evolves. An artifact ID and schema version represents a unique tuple that identifies a schema. -= Consumer schema configuration +[discrete] +== Consumer schema configuration A consumer client application uses a deserializer to get the messages that it consumes from a specific broker topic into the correct data format. To enable a consumer to use {registry} for deserialization: diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-json.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-json.adoc new file mode 100644 index 0000000000..35c2129f84 --- /dev/null +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-json.adoc @@ -0,0 +1,42 @@ +// Module included in the following assemblies: +// assembly-using-kafka-client-serdes + +[id='registry-serdes-types-json-{context}'] += Configure JSON Schema SerDe with {registry} + +{registry} provides Kafka client serializer and deserializer classes for JSON Schema to make using JSON Schema as easy as possible: + +* `io.apicurio.registry.utils.serde.JsonSchemaKafkaSerializer` +* `io.apicurio.registry.utils.serde.JsonSchemaKafkaDeserializer` + +Unlike Apache Avro, JSON Schema is not actually a serialization technology - it is instead a validation +technology. As a result, configuration options for JSON Schema are quite different. For example, there is no +encoding option, because data is always encoded as JSON. + +.Configure the JSON Schema serializer + +You can configure the JSON Schema serializer class in the following ways: + +* {registry} location as a URL +* Artifact ID strategy +* Global ID strategy +* Validation enabled/disabled + +The only non-standard configuration property is whether JSON Schema validation is enabled or +disabled. The validation feature is disabled by default but can be enabled by setting +`apicurio.registry.serdes.json-schema.validation-enabled` to `"true"`. For example: +---- +props.putIfAbsent(JsonSchemaSerDeConstants.REGISTRY_JSON_SCHEMA_VALIDATION_ENABLED, "true")` +---- + +.Configure the JSON Schema deserializer + +You can configure the JSON Schema deserializer class in the following ways: + +* {registry} location as a URL +* Validation enabled/disabled + +The deserializer is simple to configure. You must provide the location of {registry} so that the schema can be loaded. The only other configuration is whether or not to perform validation. These +configuration properties are the same as for the serializer. + +NOTE: Deserializer validation only works if the serializer passes the global ID in the Kafka message, which will only happen when validation is enabled in the serializer. diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-protobuf.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-protobuf.adoc new file mode 100644 index 0000000000..22cfbb04ac --- /dev/null +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-protobuf.adoc @@ -0,0 +1,47 @@ +// Module included in the following assemblies: +// assembly-using-kafka-client-serdes + +[id='registry-serdes-types-protobuf-{context}'] + += Configure Protobuf SerDe with {registry} + +{registry} provides Kafka client serializer and deserializer classes for Google Protobuf to make using Protobuf as easy as possible: + +* `io.apicurio.registry.utils.serde.ProtobufKafkaSerializer` +* `io.apicurio.registry.utils.serde.ProtobufKafkaDeserializer` + +.Configure the Protobuf serializer + +You can configure the Protobuf serializer class in the following ways: + +* {registry} location as a URL +* Artifact ID strategy +* Global ID strategy +* Global ID location +* Global ID handler + +.Configure the Protobuf deserializer + +You must configure the Protobuf deserializer class to match the configuration settings of the serializer. As a result, you can configure the Protobuf deserializer class in the following ways: + +* {registry} location as a URL +* Global ID handler + +See the serializer section for these configuration options - the property names and values are the same. + +[NOTE] +==== +The following options are not needed when configuring the deserializer: + +* Artifact ID strategy +* Global ID strategy +* Global ID location +==== + +The reason these options are not necessary is that the deserializer class can figure this information out from +the message itself. In the case of the two strategies, they are not needed because the serializer is responsible for sending the global ID of the schema as part of the message. + +The location of that global ID is determined (by the deserializer) by simply checking for the magic byte at the start of the message payload. If that byte is found, the global ID is read from the message payload (using the configured handler). If the magic byte is not found, the global ID is read from the message headers. + +NOTE: The Protobuf deserializer does not deserialize to your exact Protobuf Message implementation, +but rather to a `DynamicMessage` instance (because there is no appropriate API to do otherwise). diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc index 50fcb3b072..32352d5d77 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc @@ -29,6 +29,7 @@ Strategies to return an artifact ID based on an implementation of `ArtifactIdStr `TopicIdStrategy`:: (Default) strategy that uses the topic name and `key` or `value` suffix. `SimpleTopicIdStrategy`:: Simple strategy that only uses the topic name. + [discrete] == Global ID strategy diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-types.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-types.adoc index 2b21536313..fe150f6f48 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-types.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-types.adoc @@ -2,7 +2,7 @@ // assembly-using-kafka-client-serdes [id='registry-serdes-types-serde-{context}'] -= Configuring different SerDe types += Using different client serializer/deserializer types When using a schema technology in your Kafka applications, you must choose which specific schema type to use. Common options include: @@ -12,13 +12,13 @@ When using a schema technology in your Kafka applications, you must choose which Which schema technology you choose is dependent on use case and preference. Of course you can use Kafka to implement custom serializer and deserializer classes, so you are always free to write your own classes, including leveraging {registry} functionality using the {registry} REST Java client. -For your convenience, {registry} provides out-of-the box SerDe classes for all three schema technologies. This section explains how to configure Kafka applications to use each type. - -Using one of the serializer or deserializer classes provided by {registry} in your Kafka application involves setting the correct configuration properties. Here are some simple examples of configuring producer and consumer Kafka applications. +For your convenience, {registry} provides out-of-the box SerDe classes for Avro, JSON Schema, and Protobuf schema technologies. The following sections explains how to configure Kafka applications to use each type. [discrete] -== Configuring a producer +== Kafka application configuration for serializers/deserializers +Using one of the serializer or deserializer classes provided by {registry} in your Kafka application involves setting the correct configuration properties. The following simple examples show how to configure a serializer in a Kafka producer application and how to configure a deserializer in a Kafka consumer application. +.Example serializer configuration in a Kafka producer [source,java,subs="+quotes,attributes"] ---- public Producer createKafkaProducer(String kafkaBootstrapServers, String topicName) { @@ -29,7 +29,7 @@ public Producer createKafkaProducer(String kafkaBootstrapServers, props.putIfAbsent(ProducerConfig.CLIENT_ID_CONFIG, "Producer-" + topicName); props.putIfAbsent(ProducerConfig.ACKS_CONFIG, "all"); - // Use a {registry} provided Kafka Serializer + // Use a {registry}-provided Kafka serializer props.putIfAbsent(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, io.apicurio.registry.utils.serde.AvroKafkaSerializer.class.getName()); props.putIfAbsent(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, @@ -37,9 +37,11 @@ public Producer createKafkaProducer(String kafkaBootstrapServers, // Configure {registry} location props.putIfAbsent(AbstractKafkaSerDe.REGISTRY_URL_CONFIG_PARAM, REGISTRY_URL); + // Map the topic name (plus -key/value) to the artifactId in the registry props.putIfAbsent(AbstractKafkaSerializer.REGISTRY_ARTIFACT_ID_STRATEGY_CONFIG_PARAM, io.apicurio.registry.utils.serde.strategy.TopicIdStrategy.class.getName()); + // Get an existing schema or auto-register if not found props.putIfAbsent(AbstractKafkaSerializer.REGISTRY_GLOBAL_ID_STRATEGY_CONFIG_PARAM, io.apicurio.registry.utils.serde.strategy.GetOrCreateIdStrategy.class.getName()); @@ -50,9 +52,7 @@ public Producer createKafkaProducer(String kafkaBootstrapServers, } ---- -[discrete] -== Configuring a consumer - +.Example deserializer configuration in a Kafka consumer [source,java,subs="+quotes,attributes"] ---- public Consumer createKafkaConsumer(String kafkaBootstrapServers, String topicName) { @@ -65,7 +65,7 @@ public Consumer createKafkaConsumer(String kafkaBootstrapServers, props.putIfAbsent(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000"); props.putIfAbsent(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); - // Use a {registry} provided Kafka Deserializer + // Use a {registry}-provided Kafka deserializer props.putIfAbsent(ProducerConfig.KEY_DESERIALIZER_CLASS_CONFIG, io.apicurio.registry.utils.serde.AvroKafkaDeserializer.class.getName()); props.putIfAbsent(ProducerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, @@ -73,180 +73,13 @@ public Consumer createKafkaConsumer(String kafkaBootstrapServers, // Configure {registry} location props.putIfAbsent(AbstractKafkaSerDe.REGISTRY_URL_CONFIG_PARAM, REGISTRY_URL); - // No other configuration needed for the deserializer, because the globalId of the schema - // the deserializer should use is sent as part of the message. So the deserializer simply + + // No other configuration needed for deserializer because globalId of the schema + // the deserializer uses is sent as part of the message. The deserializer simply // extracts that globalId and uses it to look up the schema from the registry. - // Create the Kafka Consumer + // Create the Kafka consumer KafkaConsumer consumer = new KafkaConsumer<>(props); return consumer; } ---- - -== Using Avro SerDe with {registry} - -{registry} provides serializer and deserializer classes for Apache Avro to make using Avro as -easy as possible. These classes are: - -* `io.apicurio.registry.utils.serde.AvroKafkaSerializer` -* `io.apicurio.registry.utils.serde.AvroKafkaDeserializer` - -=== Configuring the Avro serializer - -You can configure the Avro serializer class in the following ways: - -* {registry} location as a URL -* Artifact ID strategy -* Global ID strategy -* Global ID location -* Global ID handler -* Avro datum provider -* Avro encoding - -.Global ID location -The serializer passes the unique global ID of the schema as part of the Kafka message so that consumers can use the right schema for deserialization. The location of that global ID can be in the payload of the message or in the message headers. The default approach is to pass the global ID in the message payload. If you want the ID sent in the message headers instead, you can set the following configuration property: ----- -props.putIfAbsent(AbstractKafkaSerDe.USE_HEADERS, "true") ----- -The property name is `apicurio.registry.use.headers`. - - -.Global ID handler -You can customize precisely how the global ID is encoded when passing it in the Kafka message body. Set -the configuration property `apicurio.registry.id-handler` to be a class that implements the -`io.apicurio.registry.utils.serde.strategy.IdHandler` interface. {registry} provides two implementations of -that interface: - -* `io.apicurio.registry.utils.serde.strategy.DefaultIdHandler` - stores the ID as an 8 byte long -* `io.apicurio.registry.utils.serde.strategy.Legacy4ByteIdHandler` - stores the ID as an 4 byte int - -{registry} represents the global ID of an artifact as a long, but for legacy reasons (or for compatibility with other registries or serde classes) you may want to use 4 bytes when sending the ID. - -.Avro datum provider -Avro provides different datum writers and readers to write and read data. {registry} supports three different types: - -* Generic -* Specific -* Reflect - -The {registry} `AvroDatumProvider` is the abstraction on which type is then actually used, where `DefaultAvroDatumProvider` is used by default. - -There are two configuration options you can set: - -* `apicurio.registry.avro-datum-provider` - provide a fully qualified Java class name of the `AvroDatumProvider` implementation, for example `io.apicurio.registry.utils.serde.avro.ReflectAvroDatumProvider` -* `apicurio.registry.use-specific-avro-reader` - true or false, to use specific type when using `DefaultAvroDatumProvider` - -.Avro encoding - -When using Apache Avro to serializer data, it is common to use the Avro binary encoding format. This is so that the data is encoded in as efficient a format as possible. However, Avro also supports encoding the data as JSON. Encoding as JSON is useful because it is much easier to inspect the payload of each message, often for logging, debugging, or other similar use cases. The {registry} Avro serializer can be configured to change the encoding to JSON from the default (binary). - -Set the Avro encoding to use by configuring the `apicurio.avro.encoding` property. The value must be either -`JSON` or `BINARY`. - -=== Configuring the Avro deserializer - -You must configure the Avro deserializer class to match the configuration settings of the serializer. As a -result, you can configure the Avro deserializer class in the following ways: - -* {registry} location as a URL -* Global ID handler -* Avro datum provider -* Avro encoding - -See the serializer documentation for the above configuration options - the property names and values are the same. - -[NOTE] -==== -The following options are not needed when configuring the deserializer: - -* Artifact ID strategy -* Global ID strategy -* Global ID location -==== - -The reason these options are not necessary is that the deserializer class can figure this information out from -the message itself. In the case of the two strategies, they are not needed because the serializer is responsible for sending the global ID of the schema as part of the message. - -The location of that global ID is determined by the deserializer by simply checking for the magic byte at the start of the message payload. If that byte is found, the global ID is read from the message payload using the configured handler. If the magic byte is not found, the global ID is read from the message headers. - -== Using JSON Schema SerDe with {registry} - -{registry} provides serializer and deserializer classes for JSON Schema to make using JSON Schema as easy as possible. These classes are: - -* `io.apicurio.registry.utils.serde.JsonSchemaKafkaSerializer` -* `io.apicurio.registry.utils.serde.JsonSchemaKafkaDeserializer` - -Unlike Apache Avro, JSON Schema is not actually a serialization technology - it is instead a validation -technology. As a result, configuration options for JSON Schema are quite different. For example, there is no -encoding option, because data is always encoded as JSON. - -=== Configuring the JSON Schema serializer - -You can configure the JSON Schema serializer class in the following ways: - -* {registry} location as a URL -* Artifact ID strategy -* Global ID strategy -* Validation enabled/disabled - -As you can see, the only non-standard configuration property is whether JSON Schema validation is enabled or -disabled. The validation feature is disabled by default but can be enabled by setting -`apicurio.registry.serdes.json-schema.validation-enabled` to `"true"`. For example: ----- -props.putIfAbsent(JsonSchemaSerDeConstants.REGISTRY_JSON_SCHEMA_VALIDATION_ENABLED, "true")` ----- - -=== Configuring the JSON Schema deserializer - -You can configure the JSON Schema deserializer class in the following ways: - -* {registry} location as a URL -* Validation enabled/disabled - -The deserializer is simple to configure. You must provide the location of {registry} so that the schema can be loaded. The only other configuration is whether or not to perform validation. These -configuration properties are the same as for the serializer. - -NOTE: Deserializer validation only works if the serializer passes the global ID in the Kafka message, which will only happen when validation is enabled in the serializer. - -== Using Protobuf SerDe with {registry} - -{registry} provides serializer and deserializer classes for Google Protobuf out of the box, to make using Protobuf as easy as possible. These classes are: - -* `io.apicurio.registry.utils.serde.ProtobufKafkaSerializer` -* `io.apicurio.registry.utils.serde.ProtobufKafkaDeserializer` - -=== Configuring the Protobuf serializer - -You can configure the Protobuf serializer class in the following ways: - -* {registry} location as a URL -* Artifact ID strategy -* Global ID strategy -* Global ID location -* Global ID handler - -=== Configuring the Protobuf deserializer - -You must configure the Protobuf deserializer class to match the configuration settings of the serializer. As a result, you can configure the Protobuf deserializer class in the following ways: - -* {registry} location as a URL -* Global ID handler - -See the serializer documentation these configuration options - the property names and values are the same. - -[NOTE] -==== -The following options are not needed when configuring the deserializer: - -* Artifact ID strategy -* Global ID strategy -* Global ID location -==== - -The reason these options are not necessary is that the deserializer class can figure this information out from -the message itself. In the case of the two strategies, they are not needed because the serializer is responsible for sending the global ID of the schema as part of the message. - -The location of that global ID is determined (by the deserializer) by simply checking for the magic byte at the start of the message payload. If that byte is found, the global ID is read from the message payload (using the configured handler). If the magic byte is not found, the global ID is read from the message headers. - -NOTE: The Protobuf deserializer does not deserialize to your exact Protobuf Message implementation, -but rather to a `DynamicMessage` instance (because there is no appropriate API to do otherwise). diff --git a/docs/modules/ROOT/partials/getting-started/proc-installing-postgresql-operatorhub.adoc b/docs/modules/ROOT/partials/getting-started/proc-installing-postgresql-operatorhub.adoc index 94c5506f65..e4e5b820bd 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-installing-postgresql-operatorhub.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-installing-postgresql-operatorhub.adoc @@ -8,15 +8,6 @@ If you do not already have a PostgreSQL database Operator installed, you can install a PostgreSQL Operator on your OpenShift cluster from the OperatorHub. The OperatorHub is available from the OpenShift Container Platform web console and provides an interface for cluster administrators to discover and install Operators. For more details, see the https://docs.openshift.com/container-platform/{registry-ocp-version}/operators/olm-understanding-operatorhub.html[OpenShift documentation]. -ifdef::rh-service-registry[] -[IMPORTANT] -==== -{registry} storage in a PostgreSQL database is a Technology Preview feature only. Technology Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. Red Hat does not recommend using them in production. - -These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. For more information about the support scope of Red Hat Technology Preview features, see https://access.redhat.com/support/offerings/techpreview. -==== -endif::[] - .Prerequisites * You must have cluster administrator access to an OpenShift cluster. diff --git a/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-consumer.adoc b/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-consumer.adoc index 0ae08a6257..71b8e87554 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-consumer.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-consumer.adoc @@ -2,7 +2,7 @@ // assembly-using-kafka-client-serdes [id='registry-serdes-config-consumer-{context}'] -= Using a schema from a consumer client += Using a schema from a Kafka consumer client This procedure describes how to configure a Kafka consumer client written in Java to use a schema from {registry}. diff --git a/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-producer.adoc b/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-producer.adoc index bfcdf27345..7b35e40598 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-producer.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-registry-serdes-config-producer.adoc @@ -2,7 +2,7 @@ // assembly-using-kafka-client-serdes [id='registry-serdes-config-producer-{context}'] -= Using a schema from a producer client += Using a schema from a Kafka producer client This procedure describes how to configure a Kafka producer client written in Java to use a schema from {registry}. diff --git a/docs/modules/ROOT/partials/getting-started/proc-setting-up-infinispan-storage.adoc b/docs/modules/ROOT/partials/getting-started/proc-setting-up-infinispan-storage.adoc index d6d3cc0c3f..d378794aab 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-setting-up-infinispan-storage.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-setting-up-infinispan-storage.adoc @@ -8,15 +8,6 @@ This section explains how to configure Infinispan cache-based storage for {registry} on OpenShift. This storage option is based on Infinispan community Java libraries embedded in the Quarkus-based {registry} server. You do not need to install a separate Infinispan server using this storage option. This option is suitable for development or demonstration only, and is not suitable for production environments. -ifdef::rh-service-registry[] -[IMPORTANT] -==== -{registry} storage in Infinispan is a Technology Preview feature only. Technology Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. Red Hat does not recommend using them in production. - -These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. For more information about the support scope of Red Hat Technology Preview features, see https://access.redhat.com/support/offerings/techpreview. -==== -endif::[] - .Prerequisites * You must have an OpenShift cluster with cluster administrator access. * You must have already installed {registry}. See xref:installing-registry-operatorhub[]. diff --git a/docs/modules/ROOT/partials/getting-started/proc-setting-up-postgresql-storage.adoc b/docs/modules/ROOT/partials/getting-started/proc-setting-up-postgresql-storage.adoc index 9d2db85884..d29d3390fb 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-setting-up-postgresql-storage.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-setting-up-postgresql-storage.adoc @@ -8,19 +8,10 @@ This section explains how to configure Java Persistence API-based storage for {registry} on OpenShift using a PostgreSQL database Operator. You can install {registry} in an existing database or create a new database, depending on your environment. This section shows a simple example using the PostgreSQL Operator by Dev4Ddevs.com. -ifdef::rh-service-registry[] -[IMPORTANT] -==== -{registry} storage in a PostgreSQL database is a Technology Preview feature only. Technology Preview features are not supported with Red Hat production service level agreements (SLAs) and might not be functionally complete. Red Hat does not recommend using them in production. - -These features provide early access to upcoming product features, enabling customers to test functionality and provide feedback during the development process. For more information about the support scope of Red Hat Technology Preview features, see https://access.redhat.com/support/offerings/techpreview. -==== -endif::[] - .Prerequisites * You must have an OpenShift cluster with cluster administrator access. * You must have already installed {registry}. See xref:installing-registry-operatorhub[]. -* You must have already installed a PostgreSQL Operator on OpenShift. For examaple, see xref:installing-postgresql-operatorhub[]. +* You must have already installed a PostgreSQL Operator on OpenShift. For example, see xref:installing-postgresql-operatorhub[]. .Procedure diff --git a/docs/modules/ROOT/partials/shared/attributes-links.adoc b/docs/modules/ROOT/partials/shared/attributes-links.adoc index c1e4851362..ce3fc01355 100644 --- a/docs/modules/ROOT/partials/shared/attributes-links.adoc +++ b/docs/modules/ROOT/partials/shared/attributes-links.adoc @@ -160,16 +160,16 @@ // Debezium titles -:LinkDebeziumInstallOpenShift: https://access.redhat.com/documentation/en-us/red_hat_integration/2020-q3/html-single/installing_change_data_capture_on_openshift/ +:LinkDebeziumInstallOpenShift: https://access.redhat.com/documentation/en-us/red_hat_integration/2020-Q3/html-single/installing_change_data_capture_on_openshift/ :NameDebeziumInstallOpenShift: Installing Debezium on OpenShift -:LinkDebeziumInstallRHEL: https://access.redhat.com/documentation/en-us/red_hat_integration/2020-q3/html-single/installing_change_data_capture_on_rhel/ +:LinkDebeziumInstallRHEL: https://access.redhat.com/documentation/en-us/red_hat_integration/2020-Q3/html-single/installing_change_data_capture_on_rhel/ :NameDebeziumInstallRHEL: Installing Debezium on RHEL -:LinkDebeziumGettingStarted: https://access.redhat.com/documentation/en-us/red_hat_integration/2020-q3/html-single/getting_started_with_change_data_capture/index +:LinkDebeziumGettingStarted: https://access.redhat.com/documentation/en-us/red_hat_integration/2020-Q3/html-single/getting_started_with_change_data_capture/index :NameDebeziumGettingStarted: Getting Started with Debezium -:LinkDebeziumUserGuide: https://access.redhat.com/documentation/en-us/red_hat_integration/2020-q3/html-single/debezium_user_guide/index +:LinkDebeziumUserGuide: https://access.redhat.com/documentation/en-us/red_hat_integration/2020-Q3/html-single/debezium_user_guide/index :NameDebeziumUserGuide: Debezium User Guide // Debezium link attributes that are used upstream. Add attributes as needed. diff --git a/docs/modules/ROOT/partials/shared/attributes.adoc b/docs/modules/ROOT/partials/shared/attributes.adoc index ff112546f8..f340f3ec41 100644 --- a/docs/modules/ROOT/partials/shared/attributes.adoc +++ b/docs/modules/ROOT/partials/shared/attributes.adoc @@ -14,18 +14,19 @@ // Untagged content is common // upstream -:apicurio-registry: -:registry: Apicurio Registry -:kafka-streams: Strimzi -:registry-version: 1.3 +//:apicurio-registry: +//:registry: Apicurio Registry +//:kafka-streams: Strimzi +//:registry-version: 1.3 // downstream -//:rh-service-registry: -//:registry: Service Registry -//:kafka-streams: AMQ Streams -//:registry-version: 1.0 - -//:attachmentsdir: files +:rh-service-registry: +:registry: Service Registry +:kafka-streams: AMQ Streams +:registry-version: 1.1 +:registry-ocp-version: 4.5 +:version: 2020-Q4 +:attachmentsdir: files //integration products :fuse-version: 7.7 @@ -33,12 +34,9 @@ :3scale-version: 2.9 //common -:version: 2020-Q4 :registry-ocp-version: 4.5 :context: registry - - // Characters :copy: © :infin: ∞ @@ -48,7 +46,7 @@ :reg: ® :trade: ™ -//Include attributes for deep linking +//Include attributes for external linking include::attributes-links.adoc[] // Download URLs @@ -63,7 +61,7 @@ include::attributes-links.adoc[] :registry-rule-types: xref:artifact-and-rule-types[] :managing-registry-artifacts-ui: xref:managing-registry-artifacts-ui[] :installing-the-registry-openshift: xref:installing-registry-ocp[] -:installing-the-registry-docker: xref:installing-the-registry-docker[] +:installing-the-registry-storage-openshift: xref:installing-registry-streams-storage[] :registry-reference: xref:artifact-and-rule-types[] :managing-registry-artifacts-api: xref:managing-registry-artifacts-api[] :kafka-client-serdes: xref:using-kafka-client-serdes[] From 2ecedebd03184df2a529330dd86963d8e972c7d9 Mon Sep 17 00:00:00 2001 From: Eric Wittmann Date: Thu, 19 Nov 2020 11:02:40 -0500 Subject: [PATCH 20/31] Automated update to Release Version:: 1.3.2.Final --- app/pom.xml | 2 +- client/pom.xml | 2 +- common/pom.xml | 2 +- distro/connect-converter/pom.xml | 2 +- distro/docker-compose/pom.xml | 2 +- distro/docker/pom.xml | 2 +- distro/openshift-template/pom.xml | 2 +- distro/pom.xml | 2 +- docs/pom.xml | 2 +- docs/rest-api/pom.xml | 2 +- pom.xml | 2 +- rest-client/pom.xml | 2 +- search/client/pom.xml | 2 +- search/connector/pom.xml | 2 +- storage/asyncmem/pom.xml | 2 +- storage/infinispan/pom.xml | 2 +- storage/jpa/pom.xml | 2 +- storage/kafka/pom.xml | 2 +- storage/streams/pom.xml | 2 +- tests/pom.xml | 2 +- ui/pom.xml | 2 +- utils/converter/pom.xml | 2 +- utils/kafka/pom.xml | 2 +- utils/maven-plugin/pom.xml | 2 +- utils/serde/pom.xml | 2 +- utils/streams/pom.xml | 2 +- utils/tests/pom.xml | 2 +- utils/tools/pom.xml | 2 +- 28 files changed, 28 insertions(+), 28 deletions(-) diff --git a/app/pom.xml b/app/pom.xml index 75cfe475f1..30039e1dd8 100644 --- a/app/pom.xml +++ b/app/pom.xml @@ -6,7 +6,7 @@ io.apicurio apicurio-registry - 1.3.3-SNAPSHOT + 1.3.2.Final ../pom.xml diff --git a/client/pom.xml b/client/pom.xml index 1c89abeb20..b47903372b 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.3-SNAPSHOT + 1.3.2.Final ../pom.xml diff --git a/common/pom.xml b/common/pom.xml index 6ba6d5cdb3..10f2e42639 100644 --- a/common/pom.xml +++ b/common/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.3-SNAPSHOT + 1.3.2.Final ../pom.xml diff --git a/distro/connect-converter/pom.xml b/distro/connect-converter/pom.xml index 48ae1f2693..2d7e49dcd0 100644 --- a/distro/connect-converter/pom.xml +++ b/distro/connect-converter/pom.xml @@ -5,7 +5,7 @@ io.apicurio apicurio-registry-distro - 1.3.3-SNAPSHOT + 1.3.2.Final ../pom.xml diff --git a/distro/docker-compose/pom.xml b/distro/docker-compose/pom.xml index 82d4352444..d3cb671b92 100644 --- a/distro/docker-compose/pom.xml +++ b/distro/docker-compose/pom.xml @@ -5,7 +5,7 @@ io.apicurio apicurio-registry-distro - 1.3.3-SNAPSHOT + 1.3.2.Final ../pom.xml diff --git a/distro/docker/pom.xml b/distro/docker/pom.xml index a8e08df117..1dbd0c1325 100644 --- a/distro/docker/pom.xml +++ b/distro/docker/pom.xml @@ -5,7 +5,7 @@ io.apicurio apicurio-registry-distro - 1.3.3-SNAPSHOT + 1.3.2.Final ../pom.xml diff --git a/distro/openshift-template/pom.xml b/distro/openshift-template/pom.xml index b5c9afba9b..87c9668de0 100644 --- a/distro/openshift-template/pom.xml +++ b/distro/openshift-template/pom.xml @@ -5,7 +5,7 @@ io.apicurio apicurio-registry-distro - 1.3.3-SNAPSHOT + 1.3.2.Final ../pom.xml diff --git a/distro/pom.xml b/distro/pom.xml index 8000d2521a..7e638df2aa 100644 --- a/distro/pom.xml +++ b/distro/pom.xml @@ -4,7 +4,7 @@ io.apicurio apicurio-registry - 1.3.3-SNAPSHOT + 1.3.2.Final ../pom.xml apicurio-registry-distro diff --git a/docs/pom.xml b/docs/pom.xml index f3360f08fe..6d9d377674 100644 --- a/docs/pom.xml +++ b/docs/pom.xml @@ -8,7 +8,7 @@ io.apicurio apicurio-registry - 1.3.3-SNAPSHOT + 1.3.2.Final ../pom.xml diff --git a/docs/rest-api/pom.xml b/docs/rest-api/pom.xml index 9bb6e266ff..9210fe96d8 100644 --- a/docs/rest-api/pom.xml +++ b/docs/rest-api/pom.xml @@ -8,7 +8,7 @@ io.apicurio apicurio-registry-docs - 1.3.3-SNAPSHOT + 1.3.2.Final ../pom.xml diff --git a/pom.xml b/pom.xml index 675138635d..b195cc3a97 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ io.apicurio apicurio-registry - 1.3.3-SNAPSHOT + 1.3.2.Final pom apicurio-registry diff --git a/rest-client/pom.xml b/rest-client/pom.xml index c62bb51d46..ce5ccf2eff 100644 --- a/rest-client/pom.xml +++ b/rest-client/pom.xml @@ -5,7 +5,7 @@ io.apicurio apicurio-registry - 1.3.3-SNAPSHOT + 1.3.2.Final ../pom.xml 4.0.0 diff --git a/search/client/pom.xml b/search/client/pom.xml index 18d22ae03d..4262ecc6a1 100644 --- a/search/client/pom.xml +++ b/search/client/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.3-SNAPSHOT + 1.3.2.Final ../../pom.xml diff --git a/search/connector/pom.xml b/search/connector/pom.xml index 005beaf930..a99e410ddc 100644 --- a/search/connector/pom.xml +++ b/search/connector/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.3-SNAPSHOT + 1.3.2.Final ../../pom.xml diff --git a/storage/asyncmem/pom.xml b/storage/asyncmem/pom.xml index fadddb946e..aeb5fd7e05 100644 --- a/storage/asyncmem/pom.xml +++ b/storage/asyncmem/pom.xml @@ -6,7 +6,7 @@ io.apicurio apicurio-registry - 1.3.3-SNAPSHOT + 1.3.2.Final ../../pom.xml diff --git a/storage/infinispan/pom.xml b/storage/infinispan/pom.xml index 103c85c1f7..2b7c8613d7 100644 --- a/storage/infinispan/pom.xml +++ b/storage/infinispan/pom.xml @@ -6,7 +6,7 @@ io.apicurio apicurio-registry - 1.3.3-SNAPSHOT + 1.3.2.Final ../../pom.xml diff --git a/storage/jpa/pom.xml b/storage/jpa/pom.xml index 9922795708..5e49008c2c 100644 --- a/storage/jpa/pom.xml +++ b/storage/jpa/pom.xml @@ -6,7 +6,7 @@ io.apicurio apicurio-registry - 1.3.3-SNAPSHOT + 1.3.2.Final ../../pom.xml diff --git a/storage/kafka/pom.xml b/storage/kafka/pom.xml index d160f663d0..4e43948d75 100644 --- a/storage/kafka/pom.xml +++ b/storage/kafka/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.3-SNAPSHOT + 1.3.2.Final ../../pom.xml diff --git a/storage/streams/pom.xml b/storage/streams/pom.xml index cc328304c6..f1e0a4461c 100644 --- a/storage/streams/pom.xml +++ b/storage/streams/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.3-SNAPSHOT + 1.3.2.Final ../../pom.xml diff --git a/tests/pom.xml b/tests/pom.xml index 25bc5fff0f..7b5e003054 100644 --- a/tests/pom.xml +++ b/tests/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.3-SNAPSHOT + 1.3.2.Final ../pom.xml apicurio-registry-tests diff --git a/ui/pom.xml b/ui/pom.xml index 40e21755b1..e2950c0f6d 100644 --- a/ui/pom.xml +++ b/ui/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.3-SNAPSHOT + 1.3.2.Final ../pom.xml diff --git a/utils/converter/pom.xml b/utils/converter/pom.xml index d020c316dc..7c289dd8f4 100644 --- a/utils/converter/pom.xml +++ b/utils/converter/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.3-SNAPSHOT + 1.3.2.Final ../../pom.xml diff --git a/utils/kafka/pom.xml b/utils/kafka/pom.xml index 3e99b3ae9a..5cc2c2de8f 100644 --- a/utils/kafka/pom.xml +++ b/utils/kafka/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.3-SNAPSHOT + 1.3.2.Final ../../pom.xml diff --git a/utils/maven-plugin/pom.xml b/utils/maven-plugin/pom.xml index 9081f2ba4f..e64c310978 100644 --- a/utils/maven-plugin/pom.xml +++ b/utils/maven-plugin/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.3-SNAPSHOT + 1.3.2.Final ../../pom.xml diff --git a/utils/serde/pom.xml b/utils/serde/pom.xml index 27967bef7f..30bd216733 100644 --- a/utils/serde/pom.xml +++ b/utils/serde/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.3-SNAPSHOT + 1.3.2.Final ../../pom.xml diff --git a/utils/streams/pom.xml b/utils/streams/pom.xml index 76364ac481..47bc6f684f 100644 --- a/utils/streams/pom.xml +++ b/utils/streams/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.3-SNAPSHOT + 1.3.2.Final ../../pom.xml diff --git a/utils/tests/pom.xml b/utils/tests/pom.xml index 80c9b05534..6d5528025d 100644 --- a/utils/tests/pom.xml +++ b/utils/tests/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.3-SNAPSHOT + 1.3.2.Final ../../pom.xml diff --git a/utils/tools/pom.xml b/utils/tools/pom.xml index 1c1c326257..0d02bfec36 100644 --- a/utils/tools/pom.xml +++ b/utils/tools/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.3-SNAPSHOT + 1.3.2.Final ../../pom.xml From 68da2af0941b88b8a7c7223aaf6cd7f575a23242 Mon Sep 17 00:00:00 2001 From: Eric Wittmann Date: Thu, 19 Nov 2020 12:04:51 -0500 Subject: [PATCH 21/31] Automated update to next Snapshot Version --- app/pom.xml | 2 +- client/pom.xml | 2 +- common/pom.xml | 2 +- distro/connect-converter/pom.xml | 2 +- distro/docker-compose/pom.xml | 2 +- distro/docker/pom.xml | 2 +- distro/openshift-template/pom.xml | 2 +- distro/pom.xml | 2 +- docs/antora.yml | 2 +- docs/pom.xml | 2 +- docs/rest-api/pom.xml | 2 +- pom.xml | 2 +- rest-client/pom.xml | 2 +- search/client/pom.xml | 2 +- search/connector/pom.xml | 2 +- storage/asyncmem/pom.xml | 2 +- storage/infinispan/pom.xml | 2 +- storage/jpa/pom.xml | 2 +- storage/kafka/pom.xml | 2 +- storage/streams/pom.xml | 2 +- tests/pom.xml | 2 +- ui/pom.xml | 2 +- utils/converter/pom.xml | 2 +- utils/kafka/pom.xml | 2 +- utils/maven-plugin/pom.xml | 2 +- utils/serde/pom.xml | 2 +- utils/streams/pom.xml | 2 +- utils/tests/pom.xml | 2 +- utils/tools/pom.xml | 2 +- 29 files changed, 29 insertions(+), 29 deletions(-) diff --git a/app/pom.xml b/app/pom.xml index 30039e1dd8..75cfe475f1 100644 --- a/app/pom.xml +++ b/app/pom.xml @@ -6,7 +6,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../pom.xml diff --git a/client/pom.xml b/client/pom.xml index b47903372b..1c89abeb20 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../pom.xml diff --git a/common/pom.xml b/common/pom.xml index 10f2e42639..6ba6d5cdb3 100644 --- a/common/pom.xml +++ b/common/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../pom.xml diff --git a/distro/connect-converter/pom.xml b/distro/connect-converter/pom.xml index 2d7e49dcd0..48ae1f2693 100644 --- a/distro/connect-converter/pom.xml +++ b/distro/connect-converter/pom.xml @@ -5,7 +5,7 @@ io.apicurio apicurio-registry-distro - 1.3.2.Final + 1.3.3-SNAPSHOT ../pom.xml diff --git a/distro/docker-compose/pom.xml b/distro/docker-compose/pom.xml index d3cb671b92..82d4352444 100644 --- a/distro/docker-compose/pom.xml +++ b/distro/docker-compose/pom.xml @@ -5,7 +5,7 @@ io.apicurio apicurio-registry-distro - 1.3.2.Final + 1.3.3-SNAPSHOT ../pom.xml diff --git a/distro/docker/pom.xml b/distro/docker/pom.xml index 1dbd0c1325..a8e08df117 100644 --- a/distro/docker/pom.xml +++ b/distro/docker/pom.xml @@ -5,7 +5,7 @@ io.apicurio apicurio-registry-distro - 1.3.2.Final + 1.3.3-SNAPSHOT ../pom.xml diff --git a/distro/openshift-template/pom.xml b/distro/openshift-template/pom.xml index 87c9668de0..b5c9afba9b 100644 --- a/distro/openshift-template/pom.xml +++ b/distro/openshift-template/pom.xml @@ -5,7 +5,7 @@ io.apicurio apicurio-registry-distro - 1.3.2.Final + 1.3.3-SNAPSHOT ../pom.xml diff --git a/distro/pom.xml b/distro/pom.xml index 7e638df2aa..8000d2521a 100644 --- a/distro/pom.xml +++ b/distro/pom.xml @@ -4,7 +4,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../pom.xml apicurio-registry-distro diff --git a/docs/antora.yml b/docs/antora.yml index a995c41cd3..bb57e84cac 100644 --- a/docs/antora.yml +++ b/docs/antora.yml @@ -1,6 +1,6 @@ name: apicurio-registry title: Apicurio Registry -version: '1.3.2.Final' +version: '1.3.3.Final' start_ROOT: ROOT:index.adoc nav: - modules/ROOT/nav.adoc diff --git a/docs/pom.xml b/docs/pom.xml index 6d9d377674..f3360f08fe 100644 --- a/docs/pom.xml +++ b/docs/pom.xml @@ -8,7 +8,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../pom.xml diff --git a/docs/rest-api/pom.xml b/docs/rest-api/pom.xml index 9210fe96d8..9bb6e266ff 100644 --- a/docs/rest-api/pom.xml +++ b/docs/rest-api/pom.xml @@ -8,7 +8,7 @@ io.apicurio apicurio-registry-docs - 1.3.2.Final + 1.3.3-SNAPSHOT ../pom.xml diff --git a/pom.xml b/pom.xml index b195cc3a97..675138635d 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT pom apicurio-registry diff --git a/rest-client/pom.xml b/rest-client/pom.xml index ce5ccf2eff..c62bb51d46 100644 --- a/rest-client/pom.xml +++ b/rest-client/pom.xml @@ -5,7 +5,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../pom.xml 4.0.0 diff --git a/search/client/pom.xml b/search/client/pom.xml index 4262ecc6a1..18d22ae03d 100644 --- a/search/client/pom.xml +++ b/search/client/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml diff --git a/search/connector/pom.xml b/search/connector/pom.xml index a99e410ddc..005beaf930 100644 --- a/search/connector/pom.xml +++ b/search/connector/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml diff --git a/storage/asyncmem/pom.xml b/storage/asyncmem/pom.xml index aeb5fd7e05..fadddb946e 100644 --- a/storage/asyncmem/pom.xml +++ b/storage/asyncmem/pom.xml @@ -6,7 +6,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml diff --git a/storage/infinispan/pom.xml b/storage/infinispan/pom.xml index 2b7c8613d7..103c85c1f7 100644 --- a/storage/infinispan/pom.xml +++ b/storage/infinispan/pom.xml @@ -6,7 +6,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml diff --git a/storage/jpa/pom.xml b/storage/jpa/pom.xml index 5e49008c2c..9922795708 100644 --- a/storage/jpa/pom.xml +++ b/storage/jpa/pom.xml @@ -6,7 +6,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml diff --git a/storage/kafka/pom.xml b/storage/kafka/pom.xml index 4e43948d75..d160f663d0 100644 --- a/storage/kafka/pom.xml +++ b/storage/kafka/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml diff --git a/storage/streams/pom.xml b/storage/streams/pom.xml index f1e0a4461c..cc328304c6 100644 --- a/storage/streams/pom.xml +++ b/storage/streams/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml diff --git a/tests/pom.xml b/tests/pom.xml index 7b5e003054..25bc5fff0f 100644 --- a/tests/pom.xml +++ b/tests/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../pom.xml apicurio-registry-tests diff --git a/ui/pom.xml b/ui/pom.xml index e2950c0f6d..40e21755b1 100644 --- a/ui/pom.xml +++ b/ui/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../pom.xml diff --git a/utils/converter/pom.xml b/utils/converter/pom.xml index 7c289dd8f4..d020c316dc 100644 --- a/utils/converter/pom.xml +++ b/utils/converter/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml diff --git a/utils/kafka/pom.xml b/utils/kafka/pom.xml index 5cc2c2de8f..3e99b3ae9a 100644 --- a/utils/kafka/pom.xml +++ b/utils/kafka/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml diff --git a/utils/maven-plugin/pom.xml b/utils/maven-plugin/pom.xml index e64c310978..9081f2ba4f 100644 --- a/utils/maven-plugin/pom.xml +++ b/utils/maven-plugin/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml diff --git a/utils/serde/pom.xml b/utils/serde/pom.xml index 30bd216733..27967bef7f 100644 --- a/utils/serde/pom.xml +++ b/utils/serde/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml diff --git a/utils/streams/pom.xml b/utils/streams/pom.xml index 47bc6f684f..76364ac481 100644 --- a/utils/streams/pom.xml +++ b/utils/streams/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml diff --git a/utils/tests/pom.xml b/utils/tests/pom.xml index 6d5528025d..80c9b05534 100644 --- a/utils/tests/pom.xml +++ b/utils/tests/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml diff --git a/utils/tools/pom.xml b/utils/tools/pom.xml index 0d02bfec36..1c1c326257 100644 --- a/utils/tools/pom.xml +++ b/utils/tools/pom.xml @@ -7,7 +7,7 @@ io.apicurio apicurio-registry - 1.3.2.Final + 1.3.3-SNAPSHOT ../../pom.xml From ec76f495392a1995e2cd6a7de2a6724a22684afe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ale=C5=A1=20Justin?= Date: Mon, 23 Nov 2020 15:13:24 +0100 Subject: [PATCH 22/31] Fix serializer to use the right (Registry's) schema. (#1021) --- .../utils/serde/AbstractKafkaSerializer.java | 18 +++++++ .../utils/serde/AvroKafkaDeserializer.java | 11 +---- .../utils/serde/AvroKafkaSerializer.java | 7 +++ .../serde/ProtobufKafkaDeserializer.java | 12 ++--- .../utils/serde/ProtobufKafkaSerializer.java | 7 +++ .../utils/serde/util/ResponseUtils.java | 47 +++++++++++++++++++ 6 files changed, 84 insertions(+), 18 deletions(-) create mode 100644 utils/serde/src/main/java/io/apicurio/registry/utils/serde/util/ResponseUtils.java diff --git a/utils/serde/src/main/java/io/apicurio/registry/utils/serde/AbstractKafkaSerializer.java b/utils/serde/src/main/java/io/apicurio/registry/utils/serde/AbstractKafkaSerializer.java index 1b17dd8e72..224e0c435d 100644 --- a/utils/serde/src/main/java/io/apicurio/registry/utils/serde/AbstractKafkaSerializer.java +++ b/utils/serde/src/main/java/io/apicurio/registry/utils/serde/AbstractKafkaSerializer.java @@ -24,6 +24,7 @@ import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.serialization.Serializer; +import javax.ws.rs.core.Response; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.OutputStream; @@ -34,6 +35,8 @@ */ public abstract class AbstractKafkaSerializer> extends AbstractKafkaStrategyAwareSerDe implements Serializer { + private SchemaCache cache; + public AbstractKafkaSerializer() { this(null); } @@ -50,6 +53,20 @@ public AbstractKafkaSerializer( super(client, artifactIdStrategy, globalIdStrategy); } + public synchronized SchemaCache getCache() { + if (cache == null) { + cache = new SchemaCache(getClient()) { + @Override + protected T toSchema(Response response) { + return readSchema(response); + } + }; + } + return cache; + } + + protected abstract T readSchema(Response response); + protected abstract T toSchema(U data); protected abstract ArtifactType artifactType(); @@ -73,6 +90,7 @@ public byte[] serialize(String topic, Headers headers, U data) { T schema = toSchema(data); String artifactId = getArtifactIdStrategy().artifactId(topic, isKey(), schema); long id = getGlobalIdStrategy().findId(getClient(), artifactId, artifactType(), schema); + schema = getCache().getSchema(id); // use registry's schema! ByteArrayOutputStream out = new ByteArrayOutputStream(); if (headerUtils != null) { headerUtils.addSchemaHeaders(headers, artifactId, id); diff --git a/utils/serde/src/main/java/io/apicurio/registry/utils/serde/AvroKafkaDeserializer.java b/utils/serde/src/main/java/io/apicurio/registry/utils/serde/AvroKafkaDeserializer.java index 347cb6c188..fc209940ef 100644 --- a/utils/serde/src/main/java/io/apicurio/registry/utils/serde/AvroKafkaDeserializer.java +++ b/utils/serde/src/main/java/io/apicurio/registry/utils/serde/AvroKafkaDeserializer.java @@ -18,11 +18,10 @@ package io.apicurio.registry.utils.serde; import io.apicurio.registry.client.RegistryService; -import io.apicurio.registry.utils.IoUtil; import io.apicurio.registry.utils.serde.avro.AvroDatumProvider; -import io.apicurio.registry.utils.serde.avro.AvroSchemaUtils; import io.apicurio.registry.utils.serde.avro.DefaultAvroDatumProvider; import io.apicurio.registry.utils.serde.util.HeaderUtils; +import io.apicurio.registry.utils.serde.util.ResponseUtils; import org.apache.avro.Schema; import org.apache.avro.io.DatumReader; import org.apache.avro.io.DecoderFactory; @@ -31,7 +30,6 @@ import javax.ws.rs.core.Response; import java.io.ByteArrayInputStream; import java.io.IOException; -import java.io.InputStream; import java.io.UncheckedIOException; import java.nio.ByteBuffer; import java.util.Map; @@ -79,12 +77,7 @@ public void configure(Map configs, boolean isKey) { @Override protected Schema toSchema(Response response) { - Object responseEntity = response.getEntity(); - if (responseEntity instanceof InputStream) { - return AvroSchemaUtils.parse(IoUtil.toString((InputStream) responseEntity)); - } else { - return AvroSchemaUtils.parse(response.readEntity(String.class)); - } + return ResponseUtils.toAvroSchema(response); } @Override diff --git a/utils/serde/src/main/java/io/apicurio/registry/utils/serde/AvroKafkaSerializer.java b/utils/serde/src/main/java/io/apicurio/registry/utils/serde/AvroKafkaSerializer.java index 96d87b76f0..474dce1a7d 100644 --- a/utils/serde/src/main/java/io/apicurio/registry/utils/serde/AvroKafkaSerializer.java +++ b/utils/serde/src/main/java/io/apicurio/registry/utils/serde/AvroKafkaSerializer.java @@ -25,6 +25,7 @@ import io.apicurio.registry.utils.serde.strategy.ArtifactIdStrategy; import io.apicurio.registry.utils.serde.strategy.GlobalIdStrategy; import io.apicurio.registry.utils.serde.util.HeaderUtils; +import io.apicurio.registry.utils.serde.util.ResponseUtils; import io.apicurio.registry.utils.serde.util.Utils; import org.apache.avro.Schema; import org.apache.avro.io.DatumWriter; @@ -32,6 +33,7 @@ import org.apache.avro.io.EncoderFactory; import org.apache.kafka.common.header.Headers; +import javax.ws.rs.core.Response; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.OutputStream; @@ -87,6 +89,11 @@ public void configure(Map configs, boolean isKey) { avroDatumProvider.configure(configs); } + @Override + protected Schema readSchema(Response response) { + return ResponseUtils.toAvroSchema(response); + } + @Override protected Schema toSchema(U data) { return avroDatumProvider.toSchema(data); diff --git a/utils/serde/src/main/java/io/apicurio/registry/utils/serde/ProtobufKafkaDeserializer.java b/utils/serde/src/main/java/io/apicurio/registry/utils/serde/ProtobufKafkaDeserializer.java index 5adc95738c..73cb21be14 100644 --- a/utils/serde/src/main/java/io/apicurio/registry/utils/serde/ProtobufKafkaDeserializer.java +++ b/utils/serde/src/main/java/io/apicurio/registry/utils/serde/ProtobufKafkaDeserializer.java @@ -20,16 +20,15 @@ import com.google.protobuf.DynamicMessage; import io.apicurio.registry.client.RegistryService; import io.apicurio.registry.common.proto.Serde; -import io.apicurio.registry.utils.IoUtil; +import io.apicurio.registry.utils.serde.util.ResponseUtils; import org.apache.kafka.common.header.Headers; +import javax.ws.rs.core.Response; import java.io.ByteArrayInputStream; import java.io.IOException; -import java.io.InputStream; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; -import javax.ws.rs.core.Response; /** * @author Ales Justin @@ -45,12 +44,7 @@ public ProtobufKafkaDeserializer(RegistryService client) { @Override protected byte[] toSchema(Response response) { - Object responseEntity = response.getEntity(); - if (responseEntity instanceof InputStream) { - return IoUtil.toBytes((InputStream) responseEntity); - } else { - return response.readEntity(byte[].class); - } + return ResponseUtils.toBytesSchema(response); } @Override diff --git a/utils/serde/src/main/java/io/apicurio/registry/utils/serde/ProtobufKafkaSerializer.java b/utils/serde/src/main/java/io/apicurio/registry/utils/serde/ProtobufKafkaSerializer.java index d5e05bfd10..917ad0213c 100644 --- a/utils/serde/src/main/java/io/apicurio/registry/utils/serde/ProtobufKafkaSerializer.java +++ b/utils/serde/src/main/java/io/apicurio/registry/utils/serde/ProtobufKafkaSerializer.java @@ -23,8 +23,10 @@ import io.apicurio.registry.types.ArtifactType; import io.apicurio.registry.utils.serde.strategy.ArtifactIdStrategy; import io.apicurio.registry.utils.serde.strategy.GlobalIdStrategy; +import io.apicurio.registry.utils.serde.util.ResponseUtils; import org.apache.kafka.common.header.Headers; +import javax.ws.rs.core.Response; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.OutputStream; @@ -45,6 +47,11 @@ public ProtobufKafkaSerializer(RegistryService client, ArtifactIdStrategy Date: Wed, 25 Nov 2020 20:22:32 +0100 Subject: [PATCH 23/31] Add check-period to globalId strategy. (#1025) * Add check-period to globalId strategy. * Add check period test and docs. --- .../apicurio/registry/RegistrySerdeTest.java | 50 +++++++++-- .../con-registry-serdes-strategy.adoc | 11 +++ .../AbstractKafkaStrategyAwareSerDe.java | 2 + .../strategy/AbstractCrudIdStrategy.java | 8 +- .../serde/strategy/CheckPeriodIdStrategy.java | 85 +++++++++++++++++++ .../strategy/FindBySchemaIdStrategy.java | 4 +- .../serde/strategy/FindLatestIdStrategy.java | 4 +- .../serde/strategy/GlobalIdStrategy.java | 10 +++ 8 files changed, 158 insertions(+), 16 deletions(-) create mode 100644 utils/serde/src/main/java/io/apicurio/registry/utils/serde/strategy/CheckPeriodIdStrategy.java diff --git a/app/src/test/java/io/apicurio/registry/RegistrySerdeTest.java b/app/src/test/java/io/apicurio/registry/RegistrySerdeTest.java index f0d299127b..572b045991 100644 --- a/app/src/test/java/io/apicurio/registry/RegistrySerdeTest.java +++ b/app/src/test/java/io/apicurio/registry/RegistrySerdeTest.java @@ -24,8 +24,10 @@ import io.apicurio.registry.support.Tester; import io.apicurio.registry.types.ArtifactType; import io.apicurio.registry.utils.ConcurrentUtil; +import io.apicurio.registry.utils.IoUtil; import io.apicurio.registry.utils.serde.AbstractKafkaSerDe; import io.apicurio.registry.utils.serde.AbstractKafkaSerializer; +import io.apicurio.registry.utils.serde.AbstractKafkaStrategyAwareSerDe; import io.apicurio.registry.utils.serde.AvroEncoding; import io.apicurio.registry.utils.serde.AvroKafkaDeserializer; import io.apicurio.registry.utils.serde.AvroKafkaSerializer; @@ -94,7 +96,8 @@ public void testGetOrCreate(Supplier supplier) throws Exception Schema schema = new Schema.Parser().parse("{\"type\":\"record\",\"name\":\"myrecord3\",\"fields\":[{\"name\":\"bar\",\"type\":\"string\"}]}"); String artifactId = generateArtifactId(); - CompletionStage csa = client.createArtifact(ArtifactType.AVRO, artifactId, null, new ByteArrayInputStream(schema.toString().getBytes(StandardCharsets.UTF_8))); + byte[] schemaContent = IoUtil.toBytes(schema.toString()); + CompletionStage csa = client.createArtifact(ArtifactType.AVRO, artifactId, null, new ByteArrayInputStream(schemaContent)); ArtifactMetaData amd = ConcurrentUtil.result(csa); this.waitForGlobalId(amd.getGlobalId()); @@ -127,6 +130,37 @@ public void testCachedSchema(Supplier supplier) throws Exceptio Assertions.assertEquals(id, idStrategy.findId(service, artifactId, ArtifactType.AVRO, schema)); } + @RegistryServiceTest + public void testCheckPeriod(Supplier supplier) throws Exception { + RegistryService service = supplier.get(); + + Schema schema = new Schema.Parser().parse("{\"type\":\"record\",\"name\":\"myrecord5x\",\"fields\":[{\"name\":\"bar\",\"type\":\"string\"}]}"); + String artifactId = generateArtifactId(); + byte[] schemaContent = IoUtil.toBytes(schema.toString()); + CompletionStage csa = service.createArtifact(ArtifactType.AVRO, artifactId, null, new ByteArrayInputStream(schemaContent)); + ConcurrentUtil.result(csa); + + long pc = 5000L; // 5seconds check period ... + + Map config = new HashMap<>(); + config.put(AbstractKafkaStrategyAwareSerDe.REGISTRY_CHECK_PERIOD_MS_CONFIG_PARAM, String.valueOf(pc)); + GlobalIdStrategy idStrategy = new FindLatestIdStrategy<>(); + idStrategy.configure(config, false); + + long id1 = idStrategy.findId(service, artifactId, ArtifactType.AVRO, schema); + service.reset(); + long id2 = idStrategy.findId(service, artifactId, ArtifactType.AVRO, schema); + service.reset(); + Assertions.assertEquals(id1, id2); // should be less than 5seconds ... + retry(() -> service.getArtifactMetaDataByGlobalId(id2)); + + service.updateArtifact(artifactId, ArtifactType.AVRO, new ByteArrayInputStream(schemaContent)); + Thread.sleep(pc + 1); + retry(() -> Assertions.assertNotEquals(id2, service.getArtifactMetaData(artifactId).getGlobalId())); + + Assertions.assertNotEquals(id2, idStrategy.findId(service, artifactId, ArtifactType.AVRO, schema)); + } + @SuppressWarnings("unchecked") @RegistryServiceTest public void testConfiguration(Supplier supplier) throws Exception { @@ -194,7 +228,7 @@ record = deserializer.deserialize(artifactId, bytes); @RegistryServiceTest public void testAvro(Supplier supplier) throws Exception { Schema schema = new Schema.Parser().parse("{\"type\":\"record\",\"name\":\"myrecord3\",\"fields\":[{\"name\":\"bar\",\"type\":\"string\"}]}"); - try (AvroKafkaSerializer serializer = new AvroKafkaSerializer(supplier.get()); + try (AvroKafkaSerializer serializer = new AvroKafkaSerializer<>(supplier.get()); Deserializer deserializer = new AvroKafkaDeserializer<>(supplier.get())) { serializer.setGlobalIdStrategy(new AutoRegisterIdStrategy<>()); @@ -218,9 +252,9 @@ public void testAvro(Supplier supplier) throws Exception { @RegistryServiceTest public void testAvroJSON(Supplier supplier) throws Exception { Schema schema = new Schema.Parser().parse("{\"type\":\"record\",\"name\":\"myrecord3\",\"fields\":[{\"name\":\"bar\",\"type\":\"string\"}]}"); - try (AvroKafkaSerializer serializer = new AvroKafkaSerializer(supplier.get()); + try (AvroKafkaSerializer serializer = new AvroKafkaSerializer<>(supplier.get()); Deserializer deserializer = new AvroKafkaDeserializer<>(supplier.get())) { - HashMap config = new HashMap(); + HashMap config = new HashMap<>(); config.put(AvroEncoding.AVRO_ENCODING, AvroEncoding.AVRO_JSON); serializer.configure(config,false); deserializer.configure(config, false); @@ -250,11 +284,11 @@ public void testAvroJSON(Supplier supplier) throws Exception { @RegistryServiceTest public void testAvroUsingHeaders(Supplier supplier) throws Exception { Schema schema = new Schema.Parser().parse("{\"type\":\"record\",\"name\":\"myrecord3\",\"fields\":[{\"name\":\"bar\",\"type\":\"string\"}]}"); - try (AvroKafkaSerializer serializer = new AvroKafkaSerializer(supplier.get()); + try (AvroKafkaSerializer serializer = new AvroKafkaSerializer<>(supplier.get()); Deserializer deserializer = new AvroKafkaDeserializer<>(supplier.get())) { serializer.setGlobalIdStrategy(new AutoRegisterIdStrategy<>()); - HashMap config = new HashMap(); + HashMap config = new HashMap<>(); config.put(AbstractKafkaSerDe.USE_HEADERS, "true"); serializer.configure(config,false); deserializer.configure(config, false); @@ -284,8 +318,8 @@ public void testAvroUsingHeaders(Supplier supplier) throws Exce @RegistryServiceTest public void testAvroReflect(Supplier supplier) throws Exception { - try (AvroKafkaSerializer serializer = new AvroKafkaSerializer(supplier.get()); - AvroKafkaDeserializer deserializer = new AvroKafkaDeserializer(supplier.get())) { + try (AvroKafkaSerializer serializer = new AvroKafkaSerializer<>(supplier.get()); + AvroKafkaDeserializer deserializer = new AvroKafkaDeserializer<>(supplier.get())) { serializer.setGlobalIdStrategy(new AutoRegisterIdStrategy<>()); serializer.setAvroDatumProvider(new ReflectAvroDatumProvider<>()); diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc index 32352d5d77..ecb796226a 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc @@ -64,5 +64,16 @@ Strategies to return a global ID based on an implementation of `GlobalIdStrategy `FindLatestIdStrategy`:: Strategy that returns the global ID of the latest schema version, based on an artifact ID. `FindBySchemaIdStrategy`:: Strategy that matches schema content, based on an artifact ID, to return a global ID. +`CachedSchemaIdStrategy`:: Strategy that caches the schema, and uses the global ID of the cached schema. `GetOrCreateIdStrategy`:: Strategy that tries to get the latest schema, based on an artifact ID, and if it does not exist, it creates a new schema. `AutoRegisterIdStrategy`:: Strategy that updates the schema, and uses the global ID of the updated schema. + +[discrete] +[id='configuring-globalid-strategy-{context}'] +== Configuring global ID strategy +You can configure the following application property: + +* apicurio.registry.check-period-ms -- set remote lookup period in milliseconds + +You can configure application properties as Java system properties or include them in the Quarkus +application.properties file. For more details, see the https://quarkus.io/guides/config#overriding-properties-at-runtime[Quarkus documentation]. \ No newline at end of file diff --git a/utils/serde/src/main/java/io/apicurio/registry/utils/serde/AbstractKafkaStrategyAwareSerDe.java b/utils/serde/src/main/java/io/apicurio/registry/utils/serde/AbstractKafkaStrategyAwareSerDe.java index 3042ed851a..2d55e9357d 100644 --- a/utils/serde/src/main/java/io/apicurio/registry/utils/serde/AbstractKafkaStrategyAwareSerDe.java +++ b/utils/serde/src/main/java/io/apicurio/registry/utils/serde/AbstractKafkaStrategyAwareSerDe.java @@ -31,6 +31,7 @@ public abstract class AbstractKafkaStrategyAwareSerDe> extends AbstractKafkaSerDe { public static final String REGISTRY_ARTIFACT_ID_STRATEGY_CONFIG_PARAM = "apicurio.registry.artifact-id"; public static final String REGISTRY_GLOBAL_ID_STRATEGY_CONFIG_PARAM = "apicurio.registry.global-id"; + public static final String REGISTRY_CHECK_PERIOD_MS_CONFIG_PARAM = "apicurio.registry.check-period-ms"; private ArtifactIdStrategy artifactIdStrategy; private GlobalIdStrategy globalIdStrategy; @@ -79,5 +80,6 @@ public void configure(Map configs, boolean isKey) { Object gis = configs.get(REGISTRY_GLOBAL_ID_STRATEGY_CONFIG_PARAM); instantiate(GlobalIdStrategy.class, gis, this::setGlobalIdStrategy); + getGlobalIdStrategy().configure(configs, isKey); } } diff --git a/utils/serde/src/main/java/io/apicurio/registry/utils/serde/strategy/AbstractCrudIdStrategy.java b/utils/serde/src/main/java/io/apicurio/registry/utils/serde/strategy/AbstractCrudIdStrategy.java index 1acb5b860f..54edda26e5 100644 --- a/utils/serde/src/main/java/io/apicurio/registry/utils/serde/strategy/AbstractCrudIdStrategy.java +++ b/utils/serde/src/main/java/io/apicurio/registry/utils/serde/strategy/AbstractCrudIdStrategy.java @@ -22,15 +22,15 @@ import io.apicurio.registry.types.ArtifactType; import io.apicurio.registry.utils.ConcurrentUtil; -import java.net.HttpURLConnection; -import java.util.concurrent.CompletionStage; import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.Response; +import java.net.HttpURLConnection; +import java.util.concurrent.CompletionStage; /** * @author Ales Justin */ -public abstract class AbstractCrudIdStrategy implements GlobalIdStrategy { +public abstract class AbstractCrudIdStrategy extends CheckPeriodIdStrategy { protected R unwrap(CompletionStage cs) { return ConcurrentUtil.result(cs); @@ -46,7 +46,7 @@ protected void afterCreateArtifact(T schema, ArtifactMetaData amd) { } @Override - public long findId(RegistryService service, String artifactId, ArtifactType artifactType, T schema) { + long findIdInternal(RegistryService service, String artifactId, ArtifactType artifactType, T schema) { try { return initialLookup(service, artifactId, artifactType, schema); } catch (WebApplicationException e) { diff --git a/utils/serde/src/main/java/io/apicurio/registry/utils/serde/strategy/CheckPeriodIdStrategy.java b/utils/serde/src/main/java/io/apicurio/registry/utils/serde/strategy/CheckPeriodIdStrategy.java new file mode 100644 index 0000000000..7ba96fd3ef --- /dev/null +++ b/utils/serde/src/main/java/io/apicurio/registry/utils/serde/strategy/CheckPeriodIdStrategy.java @@ -0,0 +1,85 @@ +/* + * Copyright 2020 Red Hat + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.apicurio.registry.utils.serde.strategy; + +import io.apicurio.registry.client.RegistryService; +import io.apicurio.registry.types.ArtifactType; +import io.apicurio.registry.utils.serde.AbstractKafkaStrategyAwareSerDe; + +import java.time.Duration; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * @author Ales Justin + */ +public abstract class CheckPeriodIdStrategy implements GlobalIdStrategy { + + static class CheckValue { + public CheckValue(long ts, long id) { + this.ts = ts; + this.id = id; + } + + long ts; + long id; + } + + private long checkPeriod; + private Map checkMap = new ConcurrentHashMap<>(); + + @Override + public void configure(Map configs, boolean isKey) { + Object cp = configs.get(AbstractKafkaStrategyAwareSerDe.REGISTRY_CHECK_PERIOD_MS_CONFIG_PARAM); + if (cp != null) { + long checkPeriodParam; + if (cp instanceof Number) { + checkPeriodParam = ((Number) cp).longValue(); + } else if (cp instanceof String) { + checkPeriodParam = Long.parseLong((String) cp); + } else if (cp instanceof Duration) { + checkPeriodParam = ((Duration) cp).toMillis(); + } else { + throw new IllegalArgumentException("Check period config param type unsupported: " + cp); + } + if (checkPeriodParam < 0) { + throw new IllegalArgumentException("Check period must be non-negative: " + checkPeriodParam); + } + this.checkPeriod = checkPeriodParam; + } + } + + abstract long findIdInternal(RegistryService service, String artifactId, ArtifactType artifactType, T schema); + + public long findId(RegistryService service, String artifactId, ArtifactType artifactType, T schema) { + CheckValue cv = checkMap.compute(artifactId, (aID, v) -> { + long now = System.currentTimeMillis(); + if (v == null) { + long id = findIdInternal(service, artifactId, artifactType, schema); + return new CheckValue(now, id); + } else { + if (v.ts + checkPeriod < now) { + long id = findIdInternal(service, artifactId, artifactType, schema); + v.ts = now; + v.id = id; + } + return v; + } + }); + return cv.id; + } +} diff --git a/utils/serde/src/main/java/io/apicurio/registry/utils/serde/strategy/FindBySchemaIdStrategy.java b/utils/serde/src/main/java/io/apicurio/registry/utils/serde/strategy/FindBySchemaIdStrategy.java index f0db4b2c40..a43c7dace7 100644 --- a/utils/serde/src/main/java/io/apicurio/registry/utils/serde/strategy/FindBySchemaIdStrategy.java +++ b/utils/serde/src/main/java/io/apicurio/registry/utils/serde/strategy/FindBySchemaIdStrategy.java @@ -23,9 +23,9 @@ /** * @author Ales Justin */ -public class FindBySchemaIdStrategy implements GlobalIdStrategy { +public class FindBySchemaIdStrategy extends CheckPeriodIdStrategy { @Override - public long findId(RegistryService service, String artifactId, ArtifactType artifactType, T schema) { + long findIdInternal(RegistryService service, String artifactId, ArtifactType artifactType, T schema) { ArtifactMetaData amd = service.getArtifactMetaDataByContent(artifactId, toStream(schema)); return amd.getGlobalId(); } diff --git a/utils/serde/src/main/java/io/apicurio/registry/utils/serde/strategy/FindLatestIdStrategy.java b/utils/serde/src/main/java/io/apicurio/registry/utils/serde/strategy/FindLatestIdStrategy.java index 43be8a3fab..d0b923ac84 100644 --- a/utils/serde/src/main/java/io/apicurio/registry/utils/serde/strategy/FindLatestIdStrategy.java +++ b/utils/serde/src/main/java/io/apicurio/registry/utils/serde/strategy/FindLatestIdStrategy.java @@ -23,9 +23,9 @@ /** * @author Ales Justin */ -public class FindLatestIdStrategy implements GlobalIdStrategy { +public class FindLatestIdStrategy extends CheckPeriodIdStrategy { @Override - public long findId(RegistryService service, String artifactId, ArtifactType artifactType, T schema) { + long findIdInternal(RegistryService service, String artifactId, ArtifactType artifactType, T schema) { ArtifactMetaData amd = service.getArtifactMetaData(artifactId); return amd.getGlobalId(); } diff --git a/utils/serde/src/main/java/io/apicurio/registry/utils/serde/strategy/GlobalIdStrategy.java b/utils/serde/src/main/java/io/apicurio/registry/utils/serde/strategy/GlobalIdStrategy.java index 927234f05c..cbd4da63ae 100644 --- a/utils/serde/src/main/java/io/apicurio/registry/utils/serde/strategy/GlobalIdStrategy.java +++ b/utils/serde/src/main/java/io/apicurio/registry/utils/serde/strategy/GlobalIdStrategy.java @@ -22,6 +22,7 @@ import java.io.ByteArrayInputStream; import java.io.InputStream; +import java.util.Map; /** * A {@link GlobalIdStrategy} is used by the Kafka serializer/deserializer @@ -43,6 +44,15 @@ public interface GlobalIdStrategy { */ long findId(RegistryService service, String artifactId, ArtifactType artifactType, T schema); + /** + * Configure, if supported. + * + * @param configs the configs + * @param isKey are we handling key or value + */ + default void configure(Map configs, boolean isKey) { + } + /** * Create InputStream from schema. * By default we just take string bytes. From 90d9167ac327d7b991a22e529a37c438169040db Mon Sep 17 00:00:00 2001 From: Stephen McCarthy <29098561+smccarthy-ie@users.noreply.github.com> Date: Wed, 2 Dec 2020 18:18:37 +0000 Subject: [PATCH 24/31] Doc release clean up (#1050) * fix upstream branding and tidy up xref attributes * restructure and tidy up schema look up strategies * minor tidy up --- .../assembly-registry-reference.adoc | 2 +- .../con-registry-serdes-strategy.adoc | 61 +++++++++---------- .../proc-adding-artifacts-using-console.adoc | 4 +- ...proc-browsing-artifacts-using-console.adoc | 2 +- .../proc-configuring-rules-using-console.adoc | 6 +- .../proc-setting-up-infinispan-storage.adoc | 2 +- ...proc-setting-up-kafka-streams-storage.adoc | 2 +- .../proc-setting-up-postgresql-storage.adoc | 2 +- .../ref-registry-artifact-metadata.adoc | 2 +- .../ROOT/partials/shared/attributes.adoc | 33 +++++----- 10 files changed, 60 insertions(+), 56 deletions(-) diff --git a/docs/modules/ROOT/pages/getting-started/assembly-registry-reference.adoc b/docs/modules/ROOT/pages/getting-started/assembly-registry-reference.adoc index ae52869c66..c0f72b0d45 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-registry-reference.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-registry-reference.adoc @@ -1,7 +1,7 @@ // Metadata created by nebel include::{mod-loc}shared/all-attributes.adoc[] -[id="artifact-and-rule-types"] +[id="registry-artifact-reference"] = {registry} artifact reference //If the assembly covers a task, start the title with a verb in the gerund form, such as Creating or Configuring. diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc index ecb796226a..28aa050d3f 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc @@ -4,76 +4,75 @@ [id='registry-serdes-concepts-strategy-{context}'] = Strategies to look up a schema -The Kafka client serializer uses two lookup _strategies_ to determine the artifact ID and global ID under which the message schema is registered in {registry}. +The Kafka client serializer uses _lookup strategies_ to determine the artifact ID and the global ID under which the message schema is registered in {registry}. For a given topic and message, you can use implementations of the following Java interfaces: * `ArtifactIdStrategy` to return an artifact ID * `GlobalIdStrategy` to return a global ID +The classes for each strategy are organized in the `io.apicurio.registry.utils.serde.strategy` package. The default strategy is the artifact ID `TopicIdStrategy`, which looks for {registry} artifacts with the same name as the Kafka topic receiving messages. + +.Example +[source,java,subs="+quotes,attributes"] +---- +public String artifactId(String topic, boolean isKey, T schema) { + return String.format("%s-%s", topic, isKey ? "key" : "value"); +} +---- + +* The `topic` parameter is the name of the Kafka topic receiving the message. +* The `isKey` parameter is `true` when the message key is being serialized, and `false` when the message value is being serialized. +* The `schema` parameter is the schema of the message being serialized or deserialized. +* The `artifactID` returned is the artifact ID under which the schema is registered in {registry}. + +Which lookup strategy you use depends on how and where you store your schema. For example, you might use a strategy that uses a _record ID_ if you have different Kafka topics with the same Avro message type. + [discrete] == Artifact ID strategy -The artifact ID strategy provides a way to map the Kafka topic and message information to the ID of an artifact in {registry}. The common convention for the mapping is to combine the Kafka topic name with either `key` or `value`, depending on whether the serializer is being used for the Kafka message key or value). +The artifact ID strategy provides a way to map the Kafka topic and message information to an artifact ID in {registry}. The common convention for the mapping is to combine the Kafka topic name with the `key` or `value`, depending on whether the serializer is used for the Kafka message key or value. -However, you can use alternative conventions for the mapping, either by using an alternative strategy provided by {registry} or by creating a custom Java class that implements `io.apicurio.registry.utils.serde.strategy.ArtifactIdStrategy`. +However, you can use alternative conventions for the mapping by using a strategy provided by {registry}, or by creating a custom Java class that implements `io.apicurio.registry.utils.serde.strategy.ArtifactIdStrategy`. [discrete] [id='service-registry-concepts-artifactid-{context}'] == Strategies to return an artifact ID -Strategies to return an artifact ID based on an implementation of `ArtifactIdStrategy`: +{registry} provides the following strategies to return an artifact ID based on an implementation of `ArtifactIdStrategy`: `RecordIdStrategy`:: Avro-specific strategy that uses the full name of the schema. `TopicRecordIdStrategy`:: Avro-specific strategy that uses the topic name and the full name of the schema. -`TopicIdStrategy`:: (Default) strategy that uses the topic name and `key` or `value` suffix. +`TopicIdStrategy`:: Default strategy that uses the topic name and `key` or `value` suffix. `SimpleTopicIdStrategy`:: Simple strategy that only uses the topic name. [discrete] == Global ID strategy -The global ID strategy locates and identifies the specific version of the schema registered under the artifact ID provided by the artifact ID strategy. Every version of every artifact has a single globally unique identifier that can be used to retrieve the content of that artifact. That global ID is what gets included in every Kafka message so that a deserializer can properly fetch the schema from {registry}. +The global ID strategy locates and identifies the specific version of the schema registered under the artifact ID provided by the artifact ID strategy. Every version of every artifact has a single globally unique identifier that can be used to retrieve the content of that artifact. This global ID is included in every Kafka message so that a deserializer can properly fetch the schema from {registry}. -The global ID strategy can either lookup an existing artifact version, or it can register one if not found, depending on which strategy is used. Additionally, you can provide your own strategy by creating a +The global ID strategy can look up an existing artifact version, or it can register one if not found, depending on which strategy is used. You can also provide your own strategy by creating a custom Java class that implements `io.apicurio.registry.utils.serde.strategy.GlobalIdStrategy`. -The classes for each strategy are organized in the `io.apicurio.registry.utils.serde.strategy` package. The default artifact ID strategy is `TopicIdStrategy`, which looks for {registry} artifacts with the same name as the Kafka topic receiving messages. - -.Example -[source,java,subs="+quotes,attributes"] ----- -public String artifactId(String topic, boolean isKey, T schema) { - return String.format("%s-%s", topic, isKey ? "key" : "value"); -} ----- - -* The `topic` parameter is the name of the Kafka topic receiving the message. -* The `isKey` parameter is _true_ when the message key is being serialized, and _false_ when the message value is being serialized. -* The `schema` parameter is the schema of the message being serialized/deserialized. -* The `artifactID` returned is the artifact ID under which the schema is registered in {registry}. - -What lookup strategy you use depends on how and where you store your schema. -For example, you might use a strategy that uses a _record ID_ if you have different Kafka topics with the same Avro message type. - [discrete] [id='service-registry-concepts-globalid-{context}'] == Strategies to return a global ID -Strategies to return a global ID based on an implementation of `GlobalIdStrategy`: +{registry} provides the following strategies to return a global ID based on an implementation of `GlobalIdStrategy`: `FindLatestIdStrategy`:: Strategy that returns the global ID of the latest schema version, based on an artifact ID. `FindBySchemaIdStrategy`:: Strategy that matches schema content, based on an artifact ID, to return a global ID. `CachedSchemaIdStrategy`:: Strategy that caches the schema, and uses the global ID of the cached schema. -`GetOrCreateIdStrategy`:: Strategy that tries to get the latest schema, based on an artifact ID, and if it does not exist, it creates a new schema. +`GetOrCreateIdStrategy`:: Strategy that tries to get the latest schema, based on an artifact ID, and if it does not exist, creates a new schema. `AutoRegisterIdStrategy`:: Strategy that updates the schema, and uses the global ID of the updated schema. [discrete] [id='configuring-globalid-strategy-{context}'] -== Configuring global ID strategy -You can configure the following application property: +== Configuring the global ID strategy +You can configure the following application property for the global ID strategy: -* apicurio.registry.check-period-ms -- set remote lookup period in milliseconds +* `apicurio.registry.check-period-ms`: Configures the remote schema lookup period in milliseconds You can configure application properties as Java system properties or include them in the Quarkus -application.properties file. For more details, see the https://quarkus.io/guides/config#overriding-properties-at-runtime[Quarkus documentation]. \ No newline at end of file +application.properties file. For more details, see the https://quarkus.io/guides/config#overriding-properties-at-runtime[Quarkus documentation]. diff --git a/docs/modules/ROOT/partials/getting-started/proc-adding-artifacts-using-console.adoc b/docs/modules/ROOT/partials/getting-started/proc-adding-artifacts-using-console.adoc index 6393279667..1ba3122b6a 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-adding-artifacts-using-console.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-adding-artifacts-using-console.adoc @@ -4,7 +4,7 @@ [id="adding-artifacts-using-console"] = Adding artifacts using the {registry} web console -You can use the {registry} web console to upload event schema and API design artifacts to the registry. For more details on the artifact types that you can upload, see {registry-artifact-types}. This section shows simple examples of uploading {registry} artifacts, applying artifact rules, and adding new artifact versions. +You can use the {registry} web console to upload event schema and API design artifacts to the registry. For more details on the artifact types that you can upload, see {registry-reference}. This section shows simple examples of uploading {registry} artifacts, applying artifact rules, and adding new artifact versions. .Prerequisites @@ -32,7 +32,7 @@ image::images/getting-started/registry-web-console-artifact.png[Artifact Details ** *Documentation* (OpenAPI only): Displays automatically-generated REST API documentation. ** *Content*: Displays a read-only view of the full artifact content. -. In *Content Rules*, click *Enable* to configure a *Validity Rule* or *Compatibility Rule*, and select the appropriate rule configuration from the drop-down. For more details, see {registry-rule-types}. +. In *Content Rules*, click *Enable* to configure a *Validity Rule* or *Compatibility Rule*, and select the appropriate rule configuration from the drop-down. For more details, see {registry-reference}. . Click *Upload new version* to add a new artifact version, and drag and drop or click *Browse* to upload the file, for example, `my-schema.json` or `my-openapi.json`. diff --git a/docs/modules/ROOT/partials/getting-started/proc-browsing-artifacts-using-console.adoc b/docs/modules/ROOT/partials/getting-started/proc-browsing-artifacts-using-console.adoc index cdb0c119c7..a15957cc9e 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-browsing-artifacts-using-console.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-browsing-artifacts-using-console.adoc @@ -4,7 +4,7 @@ [id="browsing-artifacts-using-console"] = Viewing artifacts using the {registry} web console -You can use the {registry} web console to browse the event schema and API design artifacts stored in the registry. This section shows simple examples of viewing {registry} artifacts, versions, and artifact rules. For more details on the artifact types stored in the registry, see {registry-artifact-types}. +You can use the {registry} web console to browse the event schema and API design artifacts stored in the registry. This section shows simple examples of viewing {registry} artifacts, versions, and artifact rules. For more details on the artifact types stored in the registry, see {registry-reference}. .Prerequisites diff --git a/docs/modules/ROOT/partials/getting-started/proc-configuring-rules-using-console.adoc b/docs/modules/ROOT/partials/getting-started/proc-configuring-rules-using-console.adoc index 5452006d9e..1d1643d983 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-configuring-rules-using-console.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-configuring-rules-using-console.adoc @@ -6,7 +6,7 @@ You can use the {registry} web console to configure optional rules to prevent invalid content from being added to the registry. All configured artifact rules or global rules must pass before a new artifact version can be uploaded to the registry. Configured artifact rules override any configured global rules. For more details, see {registry-rules}. -This section shows a simple example of configuring global and artifact rules. For details on the different rule types and associated configuration settings that you can select, see {registry-rule-types}. +This section shows a simple example of configuring global and artifact rules. For details on the different rule types and associated configuration settings that you can select, see {registry-reference}. .Prerequisites @@ -23,12 +23,12 @@ This section shows a simple example of configuring global and artifact rules. Fo . Click *View artifact* to view the *Artifact Details*. -. In *Content Rules*, click *Enable* to configure an artifact *Validity Rule* or *Compatibility Rule*, and select the appropriate rule configuration from the drop-down. For more details, see {registry-rule-types}. +. In *Content Rules*, click *Enable* to configure an artifact *Validity Rule* or *Compatibility Rule*, and select the appropriate rule configuration from the drop-down. For more details, see {registry-reference}. + .Configure content rules in {registry} web console image::images/getting-started/registry-web-console-rules.png[Configure rules in Registry web console] + -. For global rules, click the *Settings* cog icon at the top right of the toolbar, and click *Enable* to configure a global *Validity Rule* or *Compatibility Rule*, and select the appropriate rule configuration from the drop-down. For more details, see {registry-rule-types}. +. For global rules, click the *Settings* cog icon at the top right of the toolbar, and click *Enable* to configure a global *Validity Rule* or *Compatibility Rule*, and select the appropriate rule configuration from the drop-down. For more details, see {registry-reference}. . To disable an artifact rule or global rule, click the trash icon next to the rule. diff --git a/docs/modules/ROOT/partials/getting-started/proc-setting-up-infinispan-storage.adoc b/docs/modules/ROOT/partials/getting-started/proc-setting-up-infinispan-storage.adoc index d378794aab..5a0e9a8fd5 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-setting-up-infinispan-storage.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-setting-up-infinispan-storage.adoc @@ -10,7 +10,7 @@ This section explains how to configure Infinispan cache-based storage for {regis .Prerequisites * You must have an OpenShift cluster with cluster administrator access. -* You must have already installed {registry}. See xref:installing-registry-operatorhub[]. +* You must have already installed {registry}. See {installing-the-registry-openshift}. .Procedure diff --git a/docs/modules/ROOT/partials/getting-started/proc-setting-up-kafka-streams-storage.adoc b/docs/modules/ROOT/partials/getting-started/proc-setting-up-kafka-streams-storage.adoc index 9e18cc1c69..20964cc748 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-setting-up-kafka-streams-storage.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-setting-up-kafka-streams-storage.adoc @@ -9,7 +9,7 @@ This section explains how to configure Kafka-based storage for {registry} using .Prerequisites * You must have an OpenShift cluster with cluster administrator access. -* You must have already installed {registry}. See xref:installing-registry-operatorhub[]. +* You must have already installed {registry}. See {installing-the-registry-openshift}. * You must have already installed {kafka-streams}. See xref:installing-kafka-streams-operatorhub[]. .Procedure diff --git a/docs/modules/ROOT/partials/getting-started/proc-setting-up-postgresql-storage.adoc b/docs/modules/ROOT/partials/getting-started/proc-setting-up-postgresql-storage.adoc index d29d3390fb..06792e0330 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-setting-up-postgresql-storage.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-setting-up-postgresql-storage.adoc @@ -10,7 +10,7 @@ This section explains how to configure Java Persistence API-based storage for {r .Prerequisites * You must have an OpenShift cluster with cluster administrator access. -* You must have already installed {registry}. See xref:installing-registry-operatorhub[]. +* You must have already installed {registry}. See {installing-the-registry-openshift}. * You must have already installed a PostgreSQL Operator on OpenShift. For example, see xref:installing-postgresql-operatorhub[]. .Procedure diff --git a/docs/modules/ROOT/partials/getting-started/ref-registry-artifact-metadata.adoc b/docs/modules/ROOT/partials/getting-started/ref-registry-artifact-metadata.adoc index 9ea0a721f7..5259fbacbe 100644 --- a/docs/modules/ROOT/partials/getting-started/ref-registry-artifact-metadata.adoc +++ b/docs/modules/ROOT/partials/getting-started/ref-registry-artifact-metadata.adoc @@ -55,4 +55,4 @@ a| true * You can edit the `state` property only by using the state transition API. For example, you can mark an artifact as `deprecated` or `disabled`. .Additional resources -For more details, see the `/artifacts/{artifactId}/meta` sections in the link:{attachmentsdir}/registry-rest-api.htm[Apicurio Registry REST API documentation]. +For more details, see the `/artifacts/\{artifactId\}/meta` sections in the link:{attachmentsdir}/registry-rest-api.htm[Apicurio Registry REST API documentation]. diff --git a/docs/modules/ROOT/partials/shared/attributes.adoc b/docs/modules/ROOT/partials/shared/attributes.adoc index f340f3ec41..22b258d364 100644 --- a/docs/modules/ROOT/partials/shared/attributes.adoc +++ b/docs/modules/ROOT/partials/shared/attributes.adoc @@ -8,35 +8,42 @@ // Branding - toggle upstream/downstream content "on/off" -// The following attributes conditionalize content from the Apicurio upstream project: +// The following attributes conditionalize content from the Apicurio Registry project: // * Upstream-only content tagged with ifdef::apicurio-registry[]...endif::[] -// * Downstream-only content agged with ifdef::rh-service-registry[]...endif::[] +// * Downstream-only content tagged with ifdef::rh-service-registry[]...endif::[] // Untagged content is common +// Upstream condition by default, switch on/off downstream-only +//:service-registry-downstream: + // upstream -//:apicurio-registry: -//:registry: Apicurio Registry -//:kafka-streams: Strimzi -//:registry-version: 1.3 +ifndef::service-registry-downstream[] +:apicurio-registry: +:registry: Apicurio Registry +:kafka-streams: Strimzi +:registry-version: 1.3 +endif::[] // downstream +ifdef::service-registry-downstream[] :rh-service-registry: :registry: Service Registry :kafka-streams: AMQ Streams :registry-version: 1.1 -:registry-ocp-version: 4.5 +endif::[] + +//common :version: 2020-Q4 :attachmentsdir: files +:registry-ocp-version: 4.5 +:context: registry + //integration products :fuse-version: 7.7 :amq-version: 7.7 :3scale-version: 2.9 -//common -:registry-ocp-version: 4.5 -:context: registry - // Characters :copy: © :infin: ∞ @@ -57,11 +64,9 @@ include::attributes-links.adoc[] // internal links :registry-overview: xref:intro-to-the-registry[] :registry-rules: xref:intro-to-registry-rules[] -:registry-artifact-types: xref:artifact-and-rule-types[] -:registry-rule-types: xref:artifact-and-rule-types[] +:registry-reference: xref:registry-artifact-reference[] :managing-registry-artifacts-ui: xref:managing-registry-artifacts-ui[] :installing-the-registry-openshift: xref:installing-registry-ocp[] :installing-the-registry-storage-openshift: xref:installing-registry-streams-storage[] -:registry-reference: xref:artifact-and-rule-types[] :managing-registry-artifacts-api: xref:managing-registry-artifacts-api[] :kafka-client-serdes: xref:using-kafka-client-serdes[] From 8933e1f1af5bc78ba36339df9fa93b98aab86835 Mon Sep 17 00:00:00 2001 From: Stephen McCarthy <29098561+smccarthy-ie@users.noreply.github.com> Date: Wed, 16 Dec 2020 20:19:02 +0000 Subject: [PATCH 25/31] clean up install steps based on verification testing with openshift 4.5/4.6 (#1082) --- docs/modules/ROOT/nav.adoc | 4 ++-- ...installing-registry-storage-openshift.adoc | 2 ++ .../con-registry-serdes-strategy.adoc | 4 ++-- ...-installing-kafka-streams-operatorhub.adoc | 14 +++++------ ...roc-installing-postgresql-operatorhub.adoc | 10 ++++---- .../proc-installing-registry-operatorhub.adoc | 20 ++++++++-------- ...proc-setting-up-kafka-streams-storage.adoc | 23 ------------------- .../ref-registry-kafka-topic-names.adoc | 23 +++++++++++++++++++ 8 files changed, 48 insertions(+), 52 deletions(-) create mode 100644 docs/modules/ROOT/partials/getting-started/ref-registry-kafka-topic-names.adoc diff --git a/docs/modules/ROOT/nav.adoc b/docs/modules/ROOT/nav.adoc index e4fe36299a..c0567d4667 100644 --- a/docs/modules/ROOT/nav.adoc +++ b/docs/modules/ROOT/nav.adoc @@ -1,8 +1,8 @@ * xref:getting-started/assembly-intro-to-the-registry.adoc[] * xref:getting-started/assembly-intro-to-registry-rules.adoc[] -ifdef:getting-started/:apicurio-registry[] +ifdef::apicurio-registry[] * xref:getting-started/assembly-installing-registry-docker.adoc[] -endif:getting-started/:[] +endif::[] * xref:getting-started/assembly-installing-registry-openshift.adoc[] * xref:getting-started/assembly-installing-registry-storage-openshift.adoc[] * xref:getting-started/assembly-configuring-the-registry.adoc[] diff --git a/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-storage-openshift.adoc b/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-storage-openshift.adoc index e774fca8f1..85e9310ea1 100644 --- a/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-storage-openshift.adoc +++ b/docs/modules/ROOT/pages/getting-started/assembly-installing-registry-storage-openshift.adoc @@ -9,6 +9,7 @@ This chapter explains how to install and configure your chosen registry storage .{kafka-streams} storage * xref:installing-kafka-streams-operatorhub[] * xref:setting-up-kafka-streams-storage[] +* xref:registry-kafka-topic-names[] ifdef::apicurio-registry[] .Embedded Infinispan storage @@ -42,6 +43,7 @@ endif::[] //include::{mod-loc}getting-started/proc_installing-registry-kafka-streams-template-storage.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-installing-kafka-streams-operatorhub.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-setting-up-kafka-streams-storage.adoc[leveloffset=+1] +include::{mod-loc}getting-started/ref-registry-kafka-topic-names.adoc[leveloffset=+2] include::{mod-loc}getting-started/proc-setting-up-infinispan-storage.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-installing-postgresql-operatorhub.adoc[leveloffset=+1] include::{mod-loc}getting-started/proc-setting-up-postgresql-storage.adoc[leveloffset=+1] diff --git a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc index 28aa050d3f..907587a259 100644 --- a/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc +++ b/docs/modules/ROOT/partials/getting-started/con-registry-serdes-strategy.adoc @@ -69,10 +69,10 @@ custom Java class that implements `io.apicurio.registry.utils.serde.strategy.Glo [discrete] [id='configuring-globalid-strategy-{context}'] -== Configuring the global ID strategy +== Global ID strategy configuration You can configure the following application property for the global ID strategy: * `apicurio.registry.check-period-ms`: Configures the remote schema lookup period in milliseconds You can configure application properties as Java system properties or include them in the Quarkus -application.properties file. For more details, see the https://quarkus.io/guides/config#overriding-properties-at-runtime[Quarkus documentation]. +`application.properties` file. For more details, see the https://quarkus.io/guides/config#overriding-properties-at-runtime[Quarkus documentation]. diff --git a/docs/modules/ROOT/partials/getting-started/proc-installing-kafka-streams-operatorhub.adoc b/docs/modules/ROOT/partials/getting-started/proc-installing-kafka-streams-operatorhub.adoc index 0dff783745..945d7380f1 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-installing-kafka-streams-operatorhub.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-installing-kafka-streams-operatorhub.adoc @@ -23,28 +23,26 @@ endif::[] . Change to the OpenShift project in which {registry} is installed. For example, from the *Project* drop-down, select `my-project`. -. In the left navigation menu, click *Catalog* > *OperatorHub*. +. In the left navigation menu, click *Operators* > *OperatorHub*. ifdef::apicurio-registry[] -. In the *Filter by keyword* text box, enter `Strimzi` to find the *{kafka-streams}* Operator. +. In the *Filter by keyword* text box, enter `{kafka-streams}` to find the *{kafka-streams}* Operator. endif::[] ifdef::rh-service-registry[] -. In the *Filter by keyword* text box, enter `AMQ` to find the *Red Hat Integration - {kafka-streams}* Operator. +. In the *Filter by keyword* text box, enter `{kafka-streams}` to find the *Red Hat Integration - {kafka-streams}* Operator. endif::[] -. Read the information about the Operator, and click *Install*. This displays the *Create Operator Subscription* page. +. Read the information about the Operator, and click *Install* to display the Operator subscription page. . Select your subscription settings, for example: -** *Installation Mode* > *A specific namespace on the cluster* > *my-project* ifdef::apicurio-registry[] ** *Update Channel* > *stable* endif::[] ifdef::rh-service-registry[] ** *Update Channel* > *amq-streams-1.5.x* endif::[] +** *Installation Mode* > *A specific namespace on the cluster* > *my-project* ** *Approval Strategy* > *Manual* -. Click *Subscribe*. This displays the *Operators* > *Installed Operators* page. - -. Wait a few moments until the *Status* for the {kafka-streams} Operator displays *Succeeded* and the subscription is *Up to Date*. +. Click *Install*, and wait a few moments until the Operator is ready for use. .Additional resources * link:https://docs.openshift.com/container-platform/{registry-ocp-version}/operators/olm-adding-operators-to-cluster.html[Adding Operators to an OpenShift cluster] diff --git a/docs/modules/ROOT/partials/getting-started/proc-installing-postgresql-operatorhub.adoc b/docs/modules/ROOT/partials/getting-started/proc-installing-postgresql-operatorhub.adoc index e4e5b820bd..fbada14de3 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-installing-postgresql-operatorhub.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-installing-postgresql-operatorhub.adoc @@ -18,20 +18,18 @@ If you do not already have a PostgreSQL database Operator installed, you can ins . Change to the OpenShift project in which {registry} is installed. For example, from the *Project* drop-down, select `my-project`. -. In the left navigation menu, click *Catalog* > *OperatorHub*. +. In the left navigation menu, click *Operators* > *OperatorHub*. . In the *Filter by keyword* text box, enter `PostgreSQL` to find an Operator suitable for your environment, for example, *Crunchy PostgreSQL for OpenShift* or *PostgreSQL Operator by Dev4Ddevs.com*. -. Read the information about the Operator, and click *Install*. This displays the *Create Operator Subscription* page. +. Read the information about the Operator, and click *Install* to display the Operator subscription page. . Select your subscription settings, for example: -** *Installation Mode* > *A specific namespace on the cluster* > *my-project* ** *Update Channel* > *stable* +** *Installation Mode* > *A specific namespace on the cluster* > *my-project* ** *Approval Strategy* > *Manual* -. Click *Subscribe*. This displays the *Operators* > *Installed Operators* page. - -. Wait a few moments until the *Status* for the PostgreSQL Operator displays *Succeeded* and the subscription is *Up to Date*. +. Click *Install*, and wait a few moments until the Operator is ready for use. + IMPORTANT: You must read the documentation from your chosen *PostgreSQL* Operator for details on how to create and manage your database. diff --git a/docs/modules/ROOT/partials/getting-started/proc-installing-registry-operatorhub.adoc b/docs/modules/ROOT/partials/getting-started/proc-installing-registry-operatorhub.adoc index 6e0f7f3b39..73c574a2d3 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-installing-registry-operatorhub.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-installing-registry-operatorhub.adoc @@ -21,31 +21,29 @@ You can install the {registry} Operator on your OpenShift cluster from the Opera .. In the left navigation menu, click *Home* > *Project* > *Create Project*. .. Enter a project name, for example, `my-project`, and click *Create*. -. In the left navigation menu, click *Catalog* > *OperatorHub*. +. In the left navigation menu, click *Operators* > *OperatorHub*. ifdef::apicurio-registry[] -. In the *Filter by keyword* text box, enter `Registry` to find the *{registry} Operator*. +. In the *Filter by keyword* text box, enter `registry` to find the *{registry} Operator*. endif::[] ifdef::rh-service-registry[] -. In the *Filter by keyword* text box, enter `Registry` to find the *Red Hat Integration - {registry} Operator*. +. In the *Filter by keyword* text box, enter `registry` to find the *Red Hat Integration - {registry} Operator*. endif::[] -. Read the information about the Operator, and click *Install*. This displays the *Create Operator Subscription* page. +. Read the information about the Operator, and click *Install* to display the Operator subscription page. . Select your subscription settings, for example: -** *Installation Mode* > *A specific namespace on the cluster* > *my-project* ifdef::rh-service-registry[] ** *Update Channel* > Select one of the following channels: -*** *serviceregistry-1*: All minor and patch updates, such as version 1.1.0 and 1.0.1. For example, a {registry} installation on version 1.0.x automatically upgrades to 1.1.x releases. -*** *serviceregistry-1.0*: Patch updates only, such as version 1.0.1 and 1.0.2. For example, a {registry} installation on version 1.0.x automatically ignores any 1.1.x releases. +*** *serviceregistry-1*: All minor and patch updates, such as 1.1.0 and 1.0.1. An installation on 1.0.x automatically upgrades to 1.1.x. +*** *serviceregistry-1.0*: Patch updates only, such as 1.0.1 and 1.0.2. An installation on 1.0.x automatically ignores 1.1.x. +*** *serviceregistry-1.1*: Patch updates only, such as 1.1.1 and 1.1.2. An installation on 1.1.x automatically ignores 1.0.x. endif::[] ifdef::apicurio-registry[] ** *Update Channel* > *alpha* endif::[] +** *Installation Mode* > *A specific namespace on the cluster* > *my-project* ** *Approval Strategy* > *Manual* -. Click *Subscribe*. This displays the *Operators* > *Installed Operators* page. - -. Wait a few moments until the *Status* for the {registry} Operator displays *Succeeded* and the subscription is *Up to Date*. - +. Click *Install*, and wait a few moments until the Operator is ready for use. .Additional resources * link:https://docs.openshift.com/container-platform/{registry-ocp-version}/operators/olm-adding-operators-to-cluster.html[Adding Operators to an OpenShift cluster] diff --git a/docs/modules/ROOT/partials/getting-started/proc-setting-up-kafka-streams-storage.adoc b/docs/modules/ROOT/partials/getting-started/proc-setting-up-kafka-streams-storage.adoc index 20964cc748..ea8d5c8fc9 100644 --- a/docs/modules/ROOT/partials/getting-started/proc-setting-up-kafka-streams-storage.adoc +++ b/docs/modules/ROOT/partials/getting-started/proc-setting-up-kafka-streams-storage.adoc @@ -85,29 +85,6 @@ spec: http://example-apicurioregistry.my-project.my-domain-name.com/ ---- -.Overriding default Kafka topic names -You can change the default names of the Kafka topics that {registry} will use to store data in Kafka. It is sometimes -necessary to do this when sharing the Kafka cluster with other applications that may already be using topics named -`storage-topic` or `global-id-topic`. - -Change the default topic names by overriding them either by setting appropriate environment variables or by -setting appropriate Java system properties: - -.Environment variables for Kafka topic names -[%header,cols="1,2,2"] -|=== -|Topic default -|Environment variable -|System property -|`storage-topic` -a| `REGISTRY_STREAMS_TOPOLOGY_STORAGE_TOPIC` -a| `registry.streams.topology.storage.topic` -|`global-id-topic` -a| `REGISTRY_STREAMS_TOPOLOGY_GLOBAL_ID_TOPIC` -a| `registry.streams.topology.global.id.topic` -|=== - - .Additional resources ifdef::apicurio-registry[] diff --git a/docs/modules/ROOT/partials/getting-started/ref-registry-kafka-topic-names.adoc b/docs/modules/ROOT/partials/getting-started/ref-registry-kafka-topic-names.adoc new file mode 100644 index 0000000000..6273b7035c --- /dev/null +++ b/docs/modules/ROOT/partials/getting-started/ref-registry-kafka-topic-names.adoc @@ -0,0 +1,23 @@ +// Metadata created by nebel +// ParentAssemblies: assemblies/getting-started/as_installing-the-registry.adoc + +[id="registry-kafka-topic-names"] + += Kafka topic name configuration +The default Kafka topic names that {registry} uses to store data in Kafka are `storage-topic` and `global-id-topic`. You might be required to change these topic names if you are sharing your Kafka cluster with other applications that already use topics named `storage-topic` or `global-id-topic`. + +You can override the default topic names by setting the appropriate environment variables or Java system properties: + +.Kafka topic name configuration +[%header,cols="1,2,2"] +|=== +|Default topic name +|Environment variable +|Java system property +|`storage-topic` +a| `REGISTRY_STREAMS_TOPOLOGY_STORAGE_TOPIC` +a| `registry.streams.topology.storage.topic` +|`global-id-topic` +a| `REGISTRY_STREAMS_TOPOLOGY_GLOBAL_ID_TOPIC` +a| `registry.streams.topology.global.id.topic` +|=== From 728615300f9e513357abdfde4eda8a8140cea627 Mon Sep 17 00:00:00 2001 From: Eric Wittmann Date: Thu, 17 Dec 2020 06:59:55 -0500 Subject: [PATCH 26/31] Push docker images using "1.3.x-snapshot" tag --- .github/workflows/verify.yaml | 94 +++++++++++++++-------------------- 1 file changed, 40 insertions(+), 54 deletions(-) diff --git a/.github/workflows/verify.yaml b/.github/workflows/verify.yaml index 9862054cb8..9cbe0921f4 100644 --- a/.github/workflows/verify.yaml +++ b/.github/workflows/verify.yaml @@ -40,62 +40,48 @@ jobs: - name: Build All Variants run: mvn clean install -Pprod -Pjpa -Pinfinispan -Pkafka -Pstreams -Pasyncmem -pl !tests - name: Login to DockerHub Registry - if: github.event_name == 'push' && github.ref == 'refs/heads/master' + if: github.event_name == 'push' && github.ref == 'refs/heads/1.3.x' run: echo ${{ secrets.DOCKERHUB_PASSWORD }} | docker login -u ${{ secrets.DOCKERHUB_USERNAME }} --password-stdin - name: Build The Tagged Docker Images - if: github.event_name == 'push' && github.ref == 'refs/heads/master' + if: github.event_name == 'push' && github.ref == 'refs/heads/1.3.x' run: | cd distro/docker - mvn package -Pprod -DskipTests -Ddocker -Ddocker.tag.name=latest-snapshot - mvn package -Pprod -Pjpa -DskipTests -Ddocker -Ddocker.tag.name=latest-snapshot - mvn package -Pprod -Pinfinispan -DskipTests -Ddocker -Ddocker.tag.name=latest-snapshot - mvn package -Pprod -Pkafka -DskipTests -Ddocker -Ddocker.tag.name=latest-snapshot - mvn package -Pprod -Pstreams -DskipTests -Ddocker -Ddocker.tag.name=latest-snapshot - mvn package -Pprod -Pasyncmem -DskipTests -Ddocker -Ddocker.tag.name=latest-snapshot - - name: Push The Tagged Docker Images - if: github.event_name == 'push' && github.ref == 'refs/heads/master' + mvn package -Pprod -DskipTests -Ddocker -Ddocker.tag.name=1.3.x-snapshot + mvn package -Pprod -Pjpa -DskipTests -Ddocker -Ddocker.tag.name=1.3.x-snapshot + mvn package -Pprod -Pinfinispan -DskipTests -Ddocker -Ddocker.tag.name=1.3.x-snapshot + mvn package -Pprod -Pkafka -DskipTests -Ddocker -Ddocker.tag.name=1.3.x-snapshot + mvn package -Pprod -Pstreams -DskipTests -Ddocker -Ddocker.tag.name=1.3.x-snapshot + mvn package -Pprod -Pasyncmem -DskipTests -Ddocker -Ddocker.tag.name=1.3.x-snapshot + - name: Push The Tagged Docker Images to Dockerhub + if: github.event_name == 'push' && github.ref == 'refs/heads/1.3.x' run: | - docker push apicurio/apicurio-registry-mem:latest-snapshot - docker push apicurio/apicurio-registry-jpa:latest-snapshot - docker push apicurio/apicurio-registry-infinispan:latest-snapshot - docker push apicurio/apicurio-registry-kafka:latest-snapshot - docker push apicurio/apicurio-registry-streams:latest-snapshot - docker push apicurio/apicurio-registry-asyncmem:latest-snapshot - - kubernetes-tests: - name: Kubernetes Tests - runs-on: ubuntu-18.04 - needs: ["build-verify"] - if: github.repository_owner == 'Apicurio' && github.event_name == 'push' && github.ref == 'refs/heads/master' - steps: - - name: Checkout Code - uses: actions/checkout@v2 - - name: Set up JDK 1.8 - uses: AdoptOpenJDK/install-jdk@v1 - with: - version: '8' - architecture: x64 - - name: Cache Dependencies - uses: actions/cache@v1 - with: - path: ~/.m2/repository - key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }} - restore-keys: | - ${{ runner.os }}-maven- - - - name: Build Converters Distro (used in testsuite) - run: mvn install -pl distro/connect-converter -am -DskipTests -Dmaven.javadoc.skip=true --no-transfer-progress - - - name: Kubernetes Tests - run: ./.github/scripts/test_apicurio_kubernetes.sh - - - name: Collect logs - if: failure() - run: ./.github/scripts/collect_kubernetes_logs.sh - - - name: Upload tests logs artifacts - if: failure() - uses: actions/upload-artifact@v1.0.0 - with: - name: tests-logs - path: artifacts + docker push apicurio/apicurio-registry-mem:1.3.x-snapshot + docker push apicurio/apicurio-registry-jpa:1.3.x-snapshot + docker push apicurio/apicurio-registry-infinispan:1.3.x-snapshot + docker push apicurio/apicurio-registry-kafka:1.3.x-snapshot + docker push apicurio/apicurio-registry-streams:1.3.x-snapshot + docker push apicurio/apicurio-registry-asyncmem:1.3.x-snapshot + - name: Login to Quay.io Registry + if: github.event_name == 'push' && github.ref == 'refs/heads/1.3.x' + run: docker login -u "${{ secrets.QUAY_USERNAME }}" -p "${{ secrets.QUAY_PASSWORD }}" quay.io + - name: Tag Docker Images for Quay.io + if: github.event_name == 'push' && github.ref == 'refs/heads/1.3.x' + run: | + docker image tag apicurio/apicurio-registry-mem:1.3.x-snapshot quay.io/apicurio/apicurio-registry-mem:1.3.x-snapshot + docker image tag apicurio/apicurio-registry-sql:1.3.x-snapshot quay.io/apicurio/apicurio-registry-sql:1.3.x-snapshot + docker image tag apicurio/apicurio-registry-infinispan:1.3.x-snapshot quay.io/apicurio/apicurio-registry-infinispan:1.3.x-snapshot + docker image tag apicurio/apicurio-registry-streams:1.3.x-snapshot quay.io/apicurio/apicurio-registry-streams:1.3.x-snapshot + docker image tag apicurio/apicurio-registry-asyncmem:1.3.x-snapshot quay.io/apicurio/apicurio-registry-asyncmem:1.3.x-snapshot + docker image tag apicurio/apicurio-registry-kafkasql:1.3.x-snapshot quay.io/apicurio/apicurio-registry-kafkasql:1.3.x-snapshot + - name: List All The Tagged Docker Images + if: github.event_name == 'push' && github.ref == 'refs/heads/1.3.x' + run: docker images + - name: Push The Tagged Docker Images to Quay.io + if: github.event_name == 'push' && github.ref == 'refs/heads/1.3.x' + run: | + docker push quay.io/apicurio/apicurio-registry-mem:1.3.x-snapshot + docker push quay.io/apicurio/apicurio-registry-sql:1.3.x-snapshot + docker push quay.io/apicurio/apicurio-registry-infinispan:1.3.x-snapshot + docker push quay.io/apicurio/apicurio-registry-streams:1.3.x-snapshot + docker push quay.io/apicurio/apicurio-registry-asyncmem:1.3.x-snapshot + docker push quay.io/apicurio/apicurio-registry-kafkasql:1.3.x-snapshot From 1e078403e771f1441b3bd6b46f44d72dcbade816 Mon Sep 17 00:00:00 2001 From: Eric Wittmann Date: Thu, 17 Dec 2020 07:00:34 -0500 Subject: [PATCH 27/31] Update verify.yaml --- .github/workflows/verify.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/verify.yaml b/.github/workflows/verify.yaml index 9cbe0921f4..35d57e4dd6 100644 --- a/.github/workflows/verify.yaml +++ b/.github/workflows/verify.yaml @@ -7,7 +7,7 @@ on: - 'LICENSE' - 'README*' - 'docs/**' - branches: [master, 1.2.x] + branches: [1.3.x] pull_request: paths-ignore: - '.github/project.yaml' @@ -15,7 +15,7 @@ on: - 'LICENSE' - 'README*' - 'docs/**' - branches: [master] + branches: [1.3.x] jobs: build-verify: From 0d141487f313cfb67a5b5e0f2dd51eed79b3bf47 Mon Sep 17 00:00:00 2001 From: Eric Wittmann Date: Thu, 17 Dec 2020 07:05:45 -0500 Subject: [PATCH 28/31] Update release.yaml --- .github/workflows/release.yaml | 43 +++++++++++++++++----------------- 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index d64558fc6a..2425507bdb 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -1,7 +1,7 @@ name: Release Workflow on: pull_request: - branches: [master, 1.3.x] + branches: [1.3.x] types: [closed] paths: - '.github/project.yaml' @@ -122,23 +122,22 @@ jobs: - name: Push The Tagged Docker Images run: | cd registry - docker push apicurio/apicurio-registry-mem:latest - docker push apicurio/apicurio-registry-mem:latest-release + docker push apicurio/apicurio-registry-mem:1.3.x docker push apicurio/apicurio-registry-mem:${{steps.metadata.outputs.release-version}} - docker push apicurio/apicurio-registry-jpa:latest - docker push apicurio/apicurio-registry-jpa:latest-release + docker push apicurio/apicurio-registry-jpa:1.3.x + docker push apicurio/apicurio-registry-jpa:1.3.x-release docker push apicurio/apicurio-registry-jpa:${{steps.metadata.outputs.release-version}} - docker push apicurio/apicurio-registry-infinispan:latest - docker push apicurio/apicurio-registry-infinispan:latest-release + docker push apicurio/apicurio-registry-infinispan:1.3.x + docker push apicurio/apicurio-registry-infinispan:1.3.x-release docker push apicurio/apicurio-registry-infinispan:${{steps.metadata.outputs.release-version}} - docker push apicurio/apicurio-registry-kafka:latest - docker push apicurio/apicurio-registry-kafka:latest-release + docker push apicurio/apicurio-registry-kafka:1.3.x + docker push apicurio/apicurio-registry-kafka:1.3.x-release docker push apicurio/apicurio-registry-kafka:${{steps.metadata.outputs.release-version}} - docker push apicurio/apicurio-registry-streams:latest - docker push apicurio/apicurio-registry-streams:latest-release + docker push apicurio/apicurio-registry-streams:1.3.x + docker push apicurio/apicurio-registry-streams:1.3.x-release docker push apicurio/apicurio-registry-streams:${{steps.metadata.outputs.release-version}} - docker push apicurio/apicurio-registry-asyncmem:latest - docker push apicurio/apicurio-registry-asyncmem:latest-release + docker push apicurio/apicurio-registry-asyncmem:1.3.x + docker push apicurio/apicurio-registry-asyncmem:1.3.x-release docker push apicurio/apicurio-registry-asyncmem:${{steps.metadata.outputs.release-version}} - name: Maven Deploy run: | @@ -172,12 +171,12 @@ jobs: cd website echo "********** Downloading Latest Release JSON File **********" cd _data/registry - rm latestRelease.json - touch latestRelease.json && curl https://api.github.com/repos/apicurio/apicurio-registry/releases/latest > latestRelease.json + rm 1.3.x.json + touch 1.3.x.json && curl https://api.github.com/repos/apicurio/apicurio-registry/releases/latest > 1.3.x.json echo "********** Copying Latest Release JSON File **********" PUBLISHED_AT=$(cat latestRelease.json | jq '.tag_name' | sed 's/"//g') - cp latestRelease.json releases/$PUBLISHED_AT.json + cp 1.3.x.json releases/$PUBLISHED_AT.json - name: Commit Project Website Changes run: | cd website @@ -212,17 +211,17 @@ jobs: - name: Checkout Code uses: actions/checkout@v2 - name: Verify Docker Release For mem - run: ./.github/scripts/verify-docker-release.sh apicurio/apicurio-registry-mem:${{steps.metadata.outputs.release-version}} apicurio/apicurio-registry-mem:latest apicurio/apicurio-registry-mem:latest-release + run: ./.github/scripts/verify-docker-release.sh apicurio/apicurio-registry-mem:${{steps.metadata.outputs.release-version}} apicurio/apicurio-registry-mem:1.3.x - name: Verify Docker Release For asyncmem - run: ./.github/scripts/verify-docker-release.sh apicurio/apicurio-registry-asyncmem:${{steps.metadata.outputs.release-version}} apicurio/apicurio-registry-asyncmem:latest apicurio/apicurio-registry-asyncmem:latest-release + run: ./.github/scripts/verify-docker-release.sh apicurio/apicurio-registry-asyncmem:${{steps.metadata.outputs.release-version}} apicurio/apicurio-registry-asyncmem:1.3.x - name: Verify Docker Release For kafka - run: ./.github/scripts/verify-docker-release.sh apicurio/apicurio-registry-kafka:${{steps.metadata.outputs.release-version}} apicurio/apicurio-registry-kafka:latest apicurio/apicurio-registry-kafka:latest-release + run: ./.github/scripts/verify-docker-release.sh apicurio/apicurio-registry-kafka:${{steps.metadata.outputs.release-version}} apicurio/apicurio-registry-kafka:1.3.x - name: Verify Docker Release For streams - run: ./.github/scripts/verify-docker-release.sh apicurio/apicurio-registry-streams:${{steps.metadata.outputs.release-version}} apicurio/apicurio-registry-streams:latest apicurio/apicurio-registry-streams:latest-release + run: ./.github/scripts/verify-docker-release.sh apicurio/apicurio-registry-streams:${{steps.metadata.outputs.release-version}} apicurio/apicurio-registry-streams:1.3.x - name: Verify Docker Release For jpa - run: ./.github/scripts/verify-docker-release.sh apicurio/apicurio-registry-jpa:${{steps.metadata.outputs.release-version}} apicurio/apicurio-registry-jpa:latest apicurio/apicurio-registry-jpa:latest-release + run: ./.github/scripts/verify-docker-release.sh apicurio/apicurio-registry-jpa:${{steps.metadata.outputs.release-version}} apicurio/apicurio-registry-jpa:1.3.x - name: Verify Docker Release For infinispan - run: ./.github/scripts/verify-docker-release.sh apicurio/apicurio-registry-infinispan:${{steps.metadata.outputs.release-version}} apicurio/apicurio-registry-infinispan:latest apicurio/apicurio-registry-infinispan:latest-release + run: ./.github/scripts/verify-docker-release.sh apicurio/apicurio-registry-infinispan:${{steps.metadata.outputs.release-version}} apicurio/apicurio-registry-infinispan:1.3.x # Commented out because artifacts are not immediately available in central # - name: Verify Maven Release # run: | From 9d62ab3ba5db306dac0f536018021d6ce2f70fde Mon Sep 17 00:00:00 2001 From: Eric Wittmann Date: Thu, 14 Jan 2021 12:25:11 -0500 Subject: [PATCH 29/31] Fixed some merge issues (bad choices when resolving conflicts or missed merge effects). --- .github/workflows/release.yaml | 4 ++-- .github/workflows/verify.yaml | 6 +++--- docs/modules/ROOT/assets/attachments/registry-rest-api.htm | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index bb34b34f3a..628f7d3363 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -276,8 +276,8 @@ jobs: cd website echo "********** Downloading Latest Release JSON File **********" cd _data/registry - rm 1.3.x.json - touch 1.3.x.json && curl https://api.github.com/repos/apicurio/apicurio-registry/releases/latest > 1.3.x.json + rm latestRelease.json + touch latestRelease.json && curl https://api.github.com/repos/apicurio/apicurio-registry/releases/latest > latestRelease.json echo "********** Copying Latest Release JSON File **********" PUBLISHED_AT=$(cat latestRelease.json | jq '.tag_name' | sed 's/"//g') diff --git a/.github/workflows/verify.yaml b/.github/workflows/verify.yaml index f22457344f..76dce68b51 100644 --- a/.github/workflows/verify.yaml +++ b/.github/workflows/verify.yaml @@ -15,7 +15,7 @@ on: - 'LICENSE' - 'README*' - 'docs/**' - branches: [1.3.x] + branches: [master] jobs: build-verify: @@ -44,11 +44,11 @@ jobs: run: mvn clean install -Pprod -Psql -Pinfinispan -Pstreams -Pasyncmem -Pkafkasql -pl !tests - name: Login to DockerHub Registry - if: github.event_name == 'push' && github.ref == 'refs/heads/1.3.x' + if: github.event_name == 'push' && github.ref == 'refs/heads/master' run: echo ${{ secrets.DOCKERHUB_PASSWORD }} | docker login -u ${{ secrets.DOCKERHUB_USERNAME }} --password-stdin - name: Build The Tagged Docker Images - if: github.event_name == 'push' && github.ref == 'refs/heads/1.3.x' + if: github.event_name == 'push' && github.ref == 'refs/heads/master' run: | cd distro/docker mvn package -Pprod -DskipTests -Ddocker -Ddocker.tag.name=latest-snapshot diff --git a/docs/modules/ROOT/assets/attachments/registry-rest-api.htm b/docs/modules/ROOT/assets/attachments/registry-rest-api.htm index 08414ab3b7..b2a41acc77 100644 --- a/docs/modules/ROOT/assets/attachments/registry-rest-api.htm +++ b/docs/modules/ROOT/assets/attachments/registry-rest-api.htm @@ -16,7 +16,7 @@ - + From 849b2badeb35e70e20bb0564bd18faad681d46ab Mon Sep 17 00:00:00 2001 From: Eric Wittmann Date: Fri, 15 Jan 2021 13:38:53 -0500 Subject: [PATCH 30/31] Retry the schema fetch in the Kafka serializer to account for async storages --- .../utils/serde/AbstractKafkaSerializer.java | 25 ++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/utils/serde/src/main/java/io/apicurio/registry/utils/serde/AbstractKafkaSerializer.java b/utils/serde/src/main/java/io/apicurio/registry/utils/serde/AbstractKafkaSerializer.java index c33675b3ca..dcb7195389 100644 --- a/utils/serde/src/main/java/io/apicurio/registry/utils/serde/AbstractKafkaSerializer.java +++ b/utils/serde/src/main/java/io/apicurio/registry/utils/serde/AbstractKafkaSerializer.java @@ -22,6 +22,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.io.UncheckedIOException; +import java.util.concurrent.Callable; import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.serialization.Serializer; @@ -91,7 +92,11 @@ public byte[] serialize(String topic, Headers headers, U data) { T schema = toSchema(data); String artifactId = getArtifactIdStrategy().artifactId(topic, isKey(), schema); long id = getGlobalIdStrategy().findId(getClient(), artifactId, artifactType(), schema); - schema = getCache().getSchema(id); // use registry's schema! + // Note: we need to retry this fetch to account for the possibility that the GlobalId strategy just added + // the schema to the registry but the registry is not yet ready to serve it. This is due to some registry + // storages being asynchronous. This is a temporary fix - a better approach would be for the GlobalId + // strategy to seed the cache with the schema only in the case where the strategy uploaded the schema to the registry. + schema = retry(() -> getCache().getSchema(id), 5); // use registry's schema! ByteArrayOutputStream out = new ByteArrayOutputStream(); if (headerUtils != null) { headerUtils.addSchemaHeaders(headers, artifactId, id); @@ -107,4 +112,22 @@ public byte[] serialize(String topic, Headers headers, U data) { } } + protected T retry(Callable callable, int maxRetries) throws RuntimeException { + int iteration = 0; + + RuntimeException error = null; + while (iteration++ <= maxRetries) { + try { + return callable.call(); + } catch (RuntimeException e) { + error = e; + } catch (Exception e) { + error = new RuntimeException(e); + } + // Sleep before the next iteration. + try { Thread.sleep(500 * iteration); } catch (InterruptedException e) { } + } + throw error; + } + } From d5f8a9227a127e354489e96f4411f363f4b244c7 Mon Sep 17 00:00:00 2001 From: Eric Wittmann Date: Fri, 15 Jan 2021 14:14:10 -0500 Subject: [PATCH 31/31] Fix for the check period test to support async storages --- .../test/java/io/apicurio/registry/RegistrySerdeTest.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/app/src/test/java/io/apicurio/registry/RegistrySerdeTest.java b/app/src/test/java/io/apicurio/registry/RegistrySerdeTest.java index 0ff36e90e2..9da739710a 100644 --- a/app/src/test/java/io/apicurio/registry/RegistrySerdeTest.java +++ b/app/src/test/java/io/apicurio/registry/RegistrySerdeTest.java @@ -132,7 +132,8 @@ public void testCheckPeriod() throws Exception { Schema schema = new Schema.Parser().parse("{\"type\":\"record\",\"name\":\"myrecord5x\",\"fields\":[{\"name\":\"bar\",\"type\":\"string\"}]}"); String artifactId = generateArtifactId(); byte[] schemaContent = IoUtil.toBytes(schema.toString()); - restClient.createArtifact(artifactId, ArtifactType.AVRO, new ByteArrayInputStream(schemaContent)); + ArtifactMetaData amd = restClient.createArtifact(artifactId, ArtifactType.AVRO, new ByteArrayInputStream(schemaContent)); + this.waitForGlobalId(amd.getGlobalId()); long pc = 5000L; // 5seconds check period ... @@ -146,7 +147,8 @@ public void testCheckPeriod() throws Exception { Assertions.assertEquals(id1, id2); // should be less than 5seconds ... retry(() -> restClient.getArtifactMetaDataByGlobalId(id2)); - restClient.updateArtifact(artifactId, ArtifactType.AVRO, new ByteArrayInputStream(schemaContent)); + ArtifactMetaData amd_v2 = restClient.updateArtifact(artifactId, ArtifactType.AVRO, new ByteArrayInputStream(schemaContent)); + this.waitForGlobalId(amd_v2.getGlobalId()); Thread.sleep(pc + 1); retry(() -> Assertions.assertNotEquals(id2, restClient.getArtifactMetaData(artifactId).getGlobalId()));