Skip to content

Commit

Permalink
Merge pull request #26548 from gsmet/2.10.2-backports-1
Browse files Browse the repository at this point in the history
2.10.2 backports 1
  • Loading branch information
gsmet authored Jul 4, 2022
2 parents d5b047b + 4853f19 commit 3f61ae9
Show file tree
Hide file tree
Showing 58 changed files with 644 additions and 212 deletions.
12 changes: 6 additions & 6 deletions bom/application/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
<microprofile-rest-client.version>2.0</microprofile-rest-client.version>
<microprofile-jwt.version>1.2</microprofile-jwt.version>
<microprofile-lra.version>1.0</microprofile-lra.version>
<smallrye-common.version>1.12.0</smallrye-common.version>
<smallrye-common.version>1.13.0</smallrye-common.version>
<smallrye-config.version>2.10.1</smallrye-config.version>
<smallrye-health.version>3.2.1</smallrye-health.version>
<smallrye-metrics.version>3.0.5</smallrye-metrics.version>
Expand Down Expand Up @@ -90,7 +90,7 @@
<classmate.version>1.5.1</classmate.version>
<hibernate-orm.version>5.6.9.Final</hibernate-orm.version> <!-- When updating, align bytebuddy.version to Hibernate needs as well (just below): -->
<bytebuddy.version>1.12.9</bytebuddy.version> <!-- Version controlled by Hibernate ORM's needs -->
<hibernate-reactive.version>1.1.6.Final</hibernate-reactive.version>
<hibernate-reactive.version>1.1.7.Final</hibernate-reactive.version>
<hibernate-validator.version>6.2.3.Final</hibernate-validator.version>
<hibernate-search.version>6.1.5.Final</hibernate-search.version>
<narayana.version>5.12.6.Final</narayana.version>
Expand All @@ -117,7 +117,7 @@
<quartz.version>2.3.2</quartz.version>
<h2.version>2.1.210</h2.version>
<postgresql-jdbc.version>42.3.6</postgresql-jdbc.version>
<mariadb-jdbc.version>3.0.5</mariadb-jdbc.version>
<mariadb-jdbc.version>3.0.6</mariadb-jdbc.version>
<mysql-jdbc.version>8.0.29</mysql-jdbc.version>
<mssql-jdbc.version>7.2.2.jre8</mssql-jdbc.version>
<adal4j.version>1.6.7</adal4j.version>
Expand Down Expand Up @@ -189,10 +189,10 @@
<log4j2-api.version>2.17.2</log4j2-api.version>
<log4j-jboss-logmanager.version>1.3.0.Final</log4j-jboss-logmanager.version>
<avro.version>1.11.0</avro.version>
<apicurio-registry.version>2.2.3.Final</apicurio-registry.version>
<apicurio-common-rest-client.version>0.1.9.Final</apicurio-common-rest-client.version> <!-- must be the version Apicurio Registry uses -->
<apicurio-registry.version>2.2.4.Final</apicurio-registry.version>
<apicurio-common-rest-client.version>0.1.11.Final</apicurio-common-rest-client.version> <!-- must be the version Apicurio Registry uses -->
<jacoco.version>0.8.8</jacoco.version>
<testcontainers.version>1.17.2</testcontainers.version> <!-- Make sure to also update docker-java.version to match its needs -->
<testcontainers.version>1.17.3</testcontainers.version> <!-- Make sure to also update docker-java.version to match its needs -->
<docker-java.version>3.2.13</docker-java.version> <!-- must be the version Testcontainers use -->
<aesh-readline.version>2.2</aesh-readline.version>
<aesh.version>2.6</aesh.version>
Expand Down
2 changes: 1 addition & 1 deletion devtools/cli/distribution/release-cli.sh
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ export JRELEASER_PROJECT_VERSION=${VERSION}
export JRELEASER_BRANCH=${BRANCH}
export JRELEASER_CHOCOLATEY_GITHUB_BRANCH=${BRANCH}

jbang org.jreleaser:jreleaser:1.0.0 full-release \
jbang org.jreleaser:jreleaser:1.1.0 full-release \
--git-root-search \
-od target

Expand Down
2 changes: 1 addition & 1 deletion devtools/gradle/build.gradle
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
plugins {
id 'com.gradle.plugin-publish' version '0.21.0' apply false
id 'com.gradle.plugin-publish' version '1.0.0' apply false
}

subprojects {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import org.gradle.api.internal.artifacts.dependencies.DefaultDependencyArtifact;
import org.gradle.api.internal.artifacts.dependencies.DefaultExternalModuleDependency;
import org.gradle.api.plugins.JavaPlugin;
import org.gradle.api.provider.ListProperty;

import io.quarkus.bootstrap.BootstrapConstants;
import io.quarkus.bootstrap.model.PlatformImports;
Expand Down Expand Up @@ -135,13 +136,15 @@ private void setUpPlatformConfiguration() {

project.getConfigurations().create(this.platformConfigurationName, configuration -> {
// Platform configuration is just implementation, filtered to platform dependencies
configuration.getDependencies().addAllLater(project.provider(() -> project.getConfigurations()
.getByName(JavaPlugin.IMPLEMENTATION_CONFIGURATION_NAME)
.getAllDependencies()
.stream()
.filter(dependency -> dependency instanceof ModuleDependency &&
ToolingUtils.isEnforcedPlatform((ModuleDependency) dependency))
.collect(Collectors.toList())));
ListProperty<Dependency> dependencyListProperty = project.getObjects().listProperty(Dependency.class);
configuration.getDependencies()
.addAllLater(dependencyListProperty.value(project.provider(() -> project.getConfigurations()
.getByName(JavaPlugin.IMPLEMENTATION_CONFIGURATION_NAME)
.getAllDependencies()
.stream()
.filter(dependency -> dependency instanceof ModuleDependency &&
ToolingUtils.isEnforcedPlatform((ModuleDependency) dependency))
.collect(Collectors.toList()))));
// Configures PlatformImportsImpl once the platform configuration is resolved
configuration.getResolutionStrategy().eachDependency(d -> {
ModuleIdentifier identifier = d.getTarget().getModule();
Expand Down Expand Up @@ -192,7 +195,8 @@ private void setUpDeploymentConfiguration() {
project.getConfigurations().create(this.deploymentConfigurationName, configuration -> {
Configuration enforcedPlatforms = this.getPlatformConfiguration();
configuration.extendsFrom(enforcedPlatforms);
configuration.getDependencies().addAllLater(project.provider(() -> {
ListProperty<Dependency> dependencyListProperty = project.getObjects().listProperty(Dependency.class);
configuration.getDependencies().addAllLater(dependencyListProperty.value(project.provider(() -> {
ConditionalDependenciesEnabler cdEnabler = new ConditionalDependenciesEnabler(project, mode,
enforcedPlatforms);
final Collection<ExtensionDependency> allExtensions = cdEnabler.getAllExtensions();
Expand Down Expand Up @@ -225,7 +229,7 @@ private void setUpDeploymentConfiguration() {
}
}
return deploymentDependencies;
}));
})));
});
}
}
Expand Down
2 changes: 1 addition & 1 deletion docs/src/main/asciidoc/mongodb-panache-kotlin.adoc
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ personRepository.deleteAll()
val deleted = personRepository.deleteById(personId)
// set the name of all living persons to 'Mortal'
personRepository.update("name = 'Mortal' where status = ?1", Status.Alive)
var updated = personRepository.update("name", "Mortal").where("status", Status.Alive)
----

Expand Down
12 changes: 6 additions & 6 deletions docs/src/main/asciidoc/mongodb-panache.adoc
Original file line number Diff line number Diff line change
Expand Up @@ -583,16 +583,16 @@ The MongoDB POJO codec doesn't support `ZonedDateTime` and `OffsetDateTime` so y
MongoDB with Panache also supports extended MongoDB queries by providing a `Document` query, this is supported by the find/list/stream/count/delete/update methods.

MongoDB with Panache offers operations to update multiple documents based on an update document and a query :
`Person.update("foo = ?1, bar = ?2", fooName, barName).where("name = ?1", name)`.
`Person.update("foo = ?1 and bar = ?2", fooName, barName).where("name = ?1", name)`.

For these operations, you can express the update document the same way you express your queries, here are some examples:

- `<singlePropertyName>` (and single parameter) which will expand to the update document `{'$set' : {'singleColumnName': '?1'}}`
- `firstname = ?1, status = ?2` will be mapped to the update document `{'$set' : {'firstname': ?1, 'status': ?2}}`
- `firstname = :firstname, status = :status` will be mapped to the update document `{'$set' : {'firstname': :firstname, 'status': :status}}`
- `{'firstname' : ?1, 'status' : ?2}` will be mapped to the update document `{'$set' : {'firstname': ?1, 'status': ?2}}`
- `{'firstname' : firstname, 'status' : :status}` ` will be mapped to the update document `{'$set' : {'firstname': :firstname, 'status': :status}}`
- `{'$inc': {'cpt': ?1}}` will be use as-is
- `firstname = ?1 and status = ?2` will be mapped to the update document `{'$set' : {'firstname': ?1, 'status': ?2}}`
- `firstname = :firstname and status = :status` will be mapped to the update document `{'$set' : {'firstname': :firstname, 'status': :status}}`
- `{'firstname' : ?1 and 'status' : ?2}` will be mapped to the update document `{'$set' : {'firstname': ?1, 'status': ?2}}`
- `{'firstname' : :firstname and 'status' : :status}` ` will be mapped to the update document `{'$set' : {'firstname': :firstname, 'status': :status}}`
- `{'$inc': {'cpt': ?1}}` will be used as-is

=== Query parameters

Expand Down
13 changes: 3 additions & 10 deletions docs/src/main/asciidoc/stork-kubernetes.adoc
Original file line number Diff line number Diff line change
Expand Up @@ -3,21 +3,14 @@ This guide is maintained in the main Quarkus repository
and pull requests should be submitted there:
https://github.com/quarkusio/quarkus/tree/main/docs/src/main/asciidoc
////
= Getting Started with SmallRye Stork
= Using Stork with Kubernetes
:extension-status: preview

include::./attributes.adoc[]

The essence of distributed systems resides in the interaction between services.
In modern architecture, you often have multiple instances of your service to share the load or improve the resilience by redundancy.
But how do you select the best instance of your service?
That's where https://smallrye.io/smallrye-stork[SmallRye Stork] helps.
Stork is going to choose the most appropriate instance.
It offers:
This guide explains how to use Stork with Kubernetes for service discovery and load balancing.

* Extensible service discovery mechanisms
* Built-in support for Consul and Kubernetes
* Customizable client load-balancing strategies
If you are new to Stork, please read the xref:stork.adoc[Stork Getting Started Guide].

include::{includes}/extension-status.adoc[]

Expand Down
5 changes: 5 additions & 0 deletions docs/src/main/asciidoc/stork-reference.adoc
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,15 @@ and pull requests should be submitted there:
https://github.com/quarkusio/quarkus/tree/main/docs/src/main/asciidoc
////
= Stork Reference Guide
:extension-status: preview

include::./attributes.adoc[]

This guide is the companion from the xref:stork.adoc[Stork Getting Started Guide].
It explains the configuration and usage of SmallRye Stork integration in Quarkus.

include::{includes}/extension-status.adoc[]

== Supported clients

The current integration of Stork supports:
Expand Down Expand Up @@ -56,6 +59,8 @@ quarkus.stork.my-service.service-discovery.k8s-namespace=my-namespace
Stork looks for the Kubernetes Service with the given name (`my-service` in the previous example) in the specified namespace.
Instead of using the Kubernetes Service IP directly and let Kubernetes handle the selection and balancing, Stork inspects the service and retrieves the list of pods providing the service. Then, it can select the instance.

For a full example of using Stork with Kubernetes, please read the xref:stork-kubernetes.adoc[Using Stork with Kubernetes guide].

== Implementing a custom service discovery

Stork is extensible, and you can implement your own service discovery mechanism.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package io.quarkus.agroal.runtime;

import java.time.Duration;
import java.util.Collections;
import java.util.Map;
import java.util.Optional;
import java.util.OptionalInt;
Expand Down Expand Up @@ -130,12 +131,12 @@ public class DataSourceJdbcRuntimeConfig {
* validation. Setting this setting to STRICT may lead to failures in those cases.
*/
@ConfigItem
public Optional<AgroalConnectionPoolConfiguration.TransactionRequirement> transactionRequirement;
public Optional<AgroalConnectionPoolConfiguration.TransactionRequirement> transactionRequirement = Optional.empty();

/**
* Other unspecified properties to be passed to the JDBC driver when creating new connections.
*/
@ConfigItem
public Map<String, String> additionalJdbcProperties;
public Map<String, String> additionalJdbcProperties = Collections.emptyMap();

}
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ public void handleHttpRequests(RoutingContext ctx) {
event.setRequestContext(new APIGatewayV2HTTPEvent.RequestContext());
event.getRequestContext().setHttp(new APIGatewayV2HTTPEvent.RequestContext.Http());
event.getRequestContext().getHttp().setMethod(ctx.request().method().name());
event.getRequestContext().getHttp().setSourceIp(ctx.request().connection().remoteAddress().hostAddress());
event.setRawPath(ctx.request().path());
event.setRawQueryString(ctx.request().query());
for (String header : ctx.request().headers().names()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

import java.io.ByteArrayOutputStream;
import java.net.InetSocketAddress;
import java.net.URL;
import java.nio.channels.Channels;
import java.nio.channels.WritableByteChannel;
import java.nio.charset.StandardCharsets;
Expand Down Expand Up @@ -36,7 +35,6 @@
import io.netty.handler.codec.http.HttpVersion;
import io.netty.handler.codec.http.LastHttpContent;
import io.netty.util.ReferenceCountUtil;
import io.quarkus.amazon.lambda.runtime.AmazonLambdaContext;
import io.quarkus.netty.runtime.virtual.VirtualClientConnection;
import io.quarkus.netty.runtime.virtual.VirtualResponseHandler;
import io.quarkus.vertx.http.runtime.QuarkusHttpHeaders;
Expand Down Expand Up @@ -67,7 +65,7 @@ public APIGatewayV2HTTPResponse handleRequest(APIGatewayV2HTTPEvent request, Con
}

try {
return nettyDispatch(clientAddress, request, (AmazonLambdaContext) context);
return nettyDispatch(clientAddress, request, context);
} catch (Exception e) {
log.error("Request Failure", e);
APIGatewayV2HTTPResponse res = new APIGatewayV2HTTPResponse();
Expand Down Expand Up @@ -168,7 +166,7 @@ public void close() {
}

private APIGatewayV2HTTPResponse nettyDispatch(InetSocketAddress clientAddress, APIGatewayV2HTTPEvent request,
AmazonLambdaContext context)
Context context)
throws Exception {
QuarkusHttpHeaders quarkusHeaders = new QuarkusHttpHeaders();
quarkusHeaders.setContextObject(Context.class, context);
Expand Down Expand Up @@ -221,12 +219,19 @@ httpMethod, ofNullable(request.getRawQueryString())
NettyResponseHandler handler = new NettyResponseHandler(request);
VirtualClientConnection connection = VirtualClientConnection.connect(handler, VertxHttpRecorder.VIRTUAL_HTTP,
clientAddress);
if (connection.peer().remoteAddress().equals(VertxHttpRecorder.VIRTUAL_HTTP)) {
URL requestURL = context.getRequestURL();
if (request.getRequestContext() != null
&& request.getRequestContext().getHttp() != null
&& request.getRequestContext().getHttp().getSourceIp() != null
&& request.getRequestContext().getHttp().getSourceIp().length() > 0) {
int port = 443; // todo, may be bad to assume 443?
if (request.getHeaders() != null &&
request.getHeaders().get("X-Forwarded-Port") != null) {
port = Integer.parseInt(request.getHeaders().get("X-Forwarded-Port"));
}
connection.peer().attr(ConnectionBase.REMOTE_ADDRESS_OVERRIDE).set(
SocketAddress.inetSocketAddress(requestURL.getPort(), requestURL.getHost()));
SocketAddress.inetSocketAddress(port,
request.getRequestContext().getHttp().getSourceIp()));
}

connection.sendMessage(nettyRequest);
connection.sendMessage(requestContent);
try {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
import com.fasterxml.jackson.databind.ObjectReader;
import com.fasterxml.jackson.databind.ObjectWriter;

import io.quarkus.amazon.lambda.http.model.ApiGatewayRequestIdentity;
import io.quarkus.amazon.lambda.http.model.AwsProxyRequest;
import io.quarkus.amazon.lambda.http.model.AwsProxyRequestContext;
import io.quarkus.amazon.lambda.http.model.AwsProxyResponse;
Expand Down Expand Up @@ -61,6 +62,8 @@ public void handleHttpRequests(RoutingContext ctx) {
event.setRequestContext(new AwsProxyRequestContext());
event.getRequestContext().setRequestId(requestId);
event.getRequestContext().setHttpMethod(ctx.request().method().name());
event.getRequestContext().setIdentity(new ApiGatewayRequestIdentity());
event.getRequestContext().getIdentity().setSourceIp(ctx.request().connection().remoteAddress().hostAddress());
event.setHttpMethod(ctx.request().method().name());
event.setPath(ctx.request().path());
if (ctx.request().query() != null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

import java.io.ByteArrayOutputStream;
import java.net.InetSocketAddress;
import java.net.URL;
import java.net.URLEncoder;
import java.nio.channels.Channels;
import java.nio.channels.WritableByteChannel;
Expand Down Expand Up @@ -34,7 +33,6 @@
import io.quarkus.amazon.lambda.http.model.AwsProxyRequestContext;
import io.quarkus.amazon.lambda.http.model.AwsProxyResponse;
import io.quarkus.amazon.lambda.http.model.Headers;
import io.quarkus.amazon.lambda.runtime.AmazonLambdaContext;
import io.quarkus.netty.runtime.virtual.VirtualClientConnection;
import io.quarkus.netty.runtime.virtual.VirtualResponseHandler;
import io.quarkus.vertx.http.runtime.QuarkusHttpHeaders;
Expand Down Expand Up @@ -62,7 +60,7 @@ public AwsProxyResponse handleRequest(AwsProxyRequest request, Context context)
}

try {
return nettyDispatch(clientAddress, request, (AmazonLambdaContext) context);
return nettyDispatch(clientAddress, request, context);
} catch (Exception e) {
log.error("Request Failure", e);
return new AwsProxyResponse(500, errorHeaders, "{ \"message\": \"Internal Server Error\" }");
Expand Down Expand Up @@ -152,7 +150,7 @@ public void close() {
}

private AwsProxyResponse nettyDispatch(InetSocketAddress clientAddress, AwsProxyRequest request,
AmazonLambdaContext context)
Context context)
throws Exception {
String path = request.getPath();
//log.info("---- Got lambda request: " + path);
Expand Down Expand Up @@ -208,10 +206,17 @@ private AwsProxyResponse nettyDispatch(InetSocketAddress clientAddress, AwsProxy
NettyResponseHandler handler = new NettyResponseHandler(request);
VirtualClientConnection connection = VirtualClientConnection.connect(handler, VertxHttpRecorder.VIRTUAL_HTTP,
clientAddress);
if (connection.peer().remoteAddress().equals(VertxHttpRecorder.VIRTUAL_HTTP)) {
URL requestURL = context.getRequestURL();
if (request.getRequestContext() != null
&& request.getRequestContext().getIdentity() != null
&& request.getRequestContext().getIdentity().getSourceIp() != null
&& request.getRequestContext().getIdentity().getSourceIp().length() > 0) {
int port = 443; // todo, may be bad to assume 443?
if (request.getMultiValueHeaders() != null &&
request.getMultiValueHeaders().getFirst("X-Forwarded-Port") != null) {
port = Integer.parseInt(request.getMultiValueHeaders().getFirst("X-Forwarded-Port"));
}
connection.peer().attr(ConnectionBase.REMOTE_ADDRESS_OVERRIDE).set(
SocketAddress.inetSocketAddress(requestURL.getPort(), requestURL.getHost()));
SocketAddress.inetSocketAddress(port, request.getRequestContext().getIdentity().getSourceIp()));
}

connection.sendMessage(nettyRequest);
Expand Down
Loading

0 comments on commit 3f61ae9

Please sign in to comment.