diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 3853feac8d0d6..0b3d0bc09e232 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -103,6 +103,8 @@ dependencies { api 'com.avast.gradle:gradle-docker-compose-plugin:0.13.4' api 'org.apache.maven:maven-model:3.6.2' api 'com.networknt:json-schema-validator:1.0.36' + api 'org.ow2.asm:asm:9.0' + api 'org.ow2.asm:asm-tree:9.0' api "org.apache.httpcomponents:httpclient:${props.getProperty('httpclient')}" api "org.apache.httpcomponents:httpcore:${props.getProperty('httpcore')}" compileOnly "com.puppycrawl.tools:checkstyle:${props.getProperty('checkstyle')}" diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/JavaClassPublicifier.java b/buildSrc/src/main/java/org/elasticsearch/gradle/JavaClassPublicifier.java new file mode 100644 index 0000000000000..1af3981b983b9 --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/JavaClassPublicifier.java @@ -0,0 +1,124 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle; + +import org.gradle.api.DefaultTask; +import org.gradle.api.file.DirectoryProperty; +import org.gradle.api.tasks.Input; +import org.gradle.api.tasks.InputDirectory; +import org.gradle.api.tasks.OutputDirectory; +import org.gradle.api.tasks.TaskAction; +import org.objectweb.asm.ClassReader; +import org.objectweb.asm.ClassWriter; +import org.objectweb.asm.tree.ClassNode; +import org.objectweb.asm.tree.InnerClassNode; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.util.List; +import java.util.function.Consumer; + +import static org.objectweb.asm.Opcodes.ACC_PRIVATE; +import static org.objectweb.asm.Opcodes.ACC_PUBLIC; + +/** + * A task to manipulate an existing class file. + */ +public class JavaClassPublicifier extends DefaultTask { + + private List classFiles; + private DirectoryProperty inputDir; + private DirectoryProperty outputDir; + + public JavaClassPublicifier() { + this.inputDir = getProject().getObjects().directoryProperty(); + this.outputDir = getProject().getObjects().directoryProperty(); + } + + @Input + public List getClassFiles() { + return classFiles; + } + + public void setClassFiles(List classFiles) { + this.classFiles = classFiles; + } + + @InputDirectory + public DirectoryProperty getInputDir() { + return inputDir; + } + + @OutputDirectory + public DirectoryProperty getOutputDir() { + return outputDir; + } + + @TaskAction + public void adapt() throws IOException { + + for (String classFile : classFiles) { + adjustClass(classFile, classNode -> { + classNode.access &= ~ACC_PRIVATE; + classNode.access |= ACC_PUBLIC; + + if (classFile.contains("$")) { + // java inexplicably has an inner class contain itself as an inner class... + makeInnerClassPublic(classNode, classNode.name.split("\\$")[1]); + } + }); + + if (classFile.contains("$")) { + // for inner classes, also need to adjust the parent + String[] parts = classFile.split("\\$"); + String parentClassFile = parts[0] + ".class"; + String innerClass = parts[1].split("\\.")[0]; + adjustClass(parentClassFile, classNode -> makeInnerClassPublic(classNode, innerClass)); + } + } + } + + private static void makeInnerClassPublic(ClassNode classNode, String innerClass) { + InnerClassNode innerClassNode = classNode.innerClasses.stream().filter(node -> node.innerName.equals(innerClass)).findFirst().get(); + innerClassNode.access &= ~ACC_PRIVATE; + innerClassNode.access |= ACC_PUBLIC; + } + + private void writeClass(String classFile, ClassNode classNode) throws IOException { + ClassWriter classWriter = new ClassWriter(ClassWriter.COMPUTE_FRAMES); + classNode.accept(classWriter); + + File outputFile = outputDir.get().file(classFile).getAsFile(); + outputFile.getParentFile().mkdirs(); + Files.write(outputFile.toPath(), classWriter.toByteArray()); + } + + private void adjustClass(String classFile, Consumer adjustor) throws IOException { + try (InputStream is = Files.newInputStream(inputDir.get().file(classFile).getAsFile().toPath())) { + ClassReader classReader = new ClassReader(is); + ClassNode classNode = new ClassNode(); + classReader.accept(classNode, ClassReader.EXPAND_FRAMES); + adjustor.accept(classNode); + writeClass(classFile, classNode); + } + } +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/LicenseAnalyzer.java b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/LicenseAnalyzer.java index 60d9db449959d..45385067ce01c 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/LicenseAnalyzer.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/precommit/LicenseAnalyzer.java @@ -134,10 +134,33 @@ public class LicenseAnalyzer { Pattern.DOTALL ) ), + new LicenseMatcher( + "MIT-0", + true, + false, + Pattern.compile( + ("MIT No Attribution\n" + + "Copyright .+\n" + + "\n" + + "Permission is hereby granted, free of charge, to any person obtaining a copy of " + + "this software and associated documentation files \\(the \"Software\"\\), to deal in the Software without " + + "restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, " + + "and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so.\n" + + "\n" + + "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, " + + "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND " + + "NONINFRINGEMENT\\. IN NO EVENT SHALL THE AUTHORS OR " + + "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR " + + "OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n") + .replaceAll("\\s+", "\\\\s*"), + Pattern.DOTALL + ) + ), new LicenseMatcher("MPL-1.1", true, false, Pattern.compile("Mozilla Public License.*Version 1.1", Pattern.DOTALL)), new LicenseMatcher("MPL-2.0", true, false, Pattern.compile("Mozilla\\s*Public\\s*License\\s*Version\\s*2\\.0", Pattern.DOTALL)), new LicenseMatcher("XZ", false, false, Pattern.compile("Licensing of XZ for Java", Pattern.DOTALL)), new LicenseMatcher("EPL-2.0", true, false, Pattern.compile("Eclipse Public License - v 2.0", Pattern.DOTALL)), + new LicenseMatcher("EDL-1.0", true, false, Pattern.compile("Eclipse Distribution License - v 1.0", Pattern.DOTALL)), new LicenseMatcher("LGPL-2.1", true, true, Pattern.compile("GNU LESSER GENERAL PUBLIC LICENSE.*Version 2.1", Pattern.DOTALL)), new LicenseMatcher("LGPL-3.0", true, true, Pattern.compile("GNU LESSER GENERAL PUBLIC LICENSE.*Version 3", Pattern.DOTALL)) }; diff --git a/plugins/repository-azure/azure-storage-blob/build.gradle b/plugins/repository-azure/azure-storage-blob/build.gradle new file mode 100644 index 0000000000000..1ddf03ca8ab82 --- /dev/null +++ b/plugins/repository-azure/azure-storage-blob/build.gradle @@ -0,0 +1,48 @@ +import org.elasticsearch.gradle.JavaClassPublicifier; + +apply plugin: 'elasticsearch.java' +apply plugin: 'com.github.johnrengelman.shadow' + +configurations { + originalJar { + transitive = false + } +} + +dependencies { + originalJar "com.azure:azure-storage-blob:${project.parent.versions.azure}" + implementation "com.azure:azure-storage-blob:${project.parent.versions.azure}" +} + +// We have to rewrite the service classes to make them public to avoid +// granting the permission "java.lang.reflect.ReflectPermission" "newProxyInPackage" +// to this plugin. +// +// There are plans to make those public in the azure sdk side, but in the meanwhile +// we just do this workaround +// https://github.com/Azure/azure-sdk-for-java/issues/12829#issuecomment-736755543 +List classesToRewrite = ['com/azure/storage/blob/implementation/AppendBlobsImpl$AppendBlobsService.class', + 'com/azure/storage/blob/implementation/BlobsImpl$BlobsService.class', + 'com/azure/storage/blob/implementation/BlockBlobsImpl$BlockBlobsService.class', + 'com/azure/storage/blob/implementation/ContainersImpl$ContainersService.class', + 'com/azure/storage/blob/implementation/DirectorysImpl$DirectorysService.class', + 'com/azure/storage/blob/implementation/PageBlobsImpl$PageBlobsService.class', + 'com/azure/storage/blob/implementation/ServicesImpl$ServicesService.class'] + +tasks.create('extractClientClasses', Copy).configure { + from({ zipTree(configurations.originalJar.singleFile) }) { + include "com/azure/storage/blob/implementation/**" + } + into project.file('build/original') +} + +def modifiedOutput = project.layout.buildDirectory.dir('modified') +def makePublic = tasks.create('makeClientClassesPublic', JavaClassPublicifier) +makePublic.configure { + dependsOn 'extractClientClasses' + classFiles = classesToRewrite + inputDir = project.layout.buildDirectory.dir('original') + outputDir = modifiedOutput +} + +sourceSets.main.output.dir(modifiedOutput, builtBy: makePublic) diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 6a054e4aab317..20752e045e489 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -31,11 +31,61 @@ esplugin { classname 'org.elasticsearch.repositories.azure.AzureRepositoryPlugin' } +versions << [ + 'azure': '12.9.0', + 'azureCore': '1.10.0', + 'azureCoreHttpNetty': '1.6.3', + + 'jakartaActivation': '1.2.1', + 'jakartaXMLBind': '2.3.2', + 'stax2API': '4.2', + 'woodstox': '6.0.2', + + 'reactorNetty': '0.9.12.RELEASE', + 'reactorCore': '3.3.10.RELEASE', + 'reactiveStreams': '1.0.3', +] + dependencies { - api 'com.microsoft.azure:azure-storage:8.6.2' - api 'com.microsoft.azure:azure-keyvault-core:1.0.0' - runtimeOnly 'com.google.guava:guava:20.0' - api 'org.apache.commons:commons-lang3:3.4' + api project(path: 'azure-storage-blob', configuration: 'shadow') + api "com.azure:azure-storage-common:${versions.azure}" + api "com.azure:azure-core-http-netty:${versions.azureCoreHttpNetty}" + api "com.azure:azure-core:${versions.azureCore}" + + // jackson + api "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" + api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" + api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" + api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}" + + // jackson xml + api "com.fasterxml.jackson.dataformat:jackson-dataformat-xml:${versions.jackson}" + api "com.fasterxml.jackson.datatype:jackson-datatype-jsr310:${versions.jackson}" + api "com.fasterxml.jackson.module:jackson-module-jaxb-annotations:${versions.jackson}" + api "jakarta.activation:jakarta.activation-api:${versions.jakartaActivation}" + api "jakarta.xml.bind:jakarta.xml.bind-api:${versions.jakartaXMLBind}" + api "org.codehaus.woodstox:stax2-api:${versions.stax2API}" + api "com.fasterxml.woodstox:woodstox-core:${versions.woodstox}" + + // netty + api "io.netty:netty-buffer:${versions.netty}" + api "io.netty:netty-codec-http:${versions.netty}" + api "io.netty:netty-codec-http2:${versions.netty}" + api "io.netty:netty-codec-socks:${versions.netty}" + api "io.netty:netty-codec:${versions.netty}" + api "io.netty:netty-common:${versions.netty}" + api "io.netty:netty-handler-proxy:${versions.netty}" + api "io.netty:netty-handler:${versions.netty}" + api "io.netty:netty-resolver:${versions.netty}" + api "io.netty:netty-transport:${versions.netty}" + + // reactor + api "io.projectreactor.netty:reactor-netty:${versions.reactorNetty}" + api "io.projectreactor:reactor-core:${versions.reactorCore}" + api "org.reactivestreams:reactive-streams:${versions.reactiveStreams}" + + api "org.slf4j:slf4j-api:${versions.slf4j}" + testImplementation project(':test:fixtures:azure-fixture') } @@ -48,41 +98,213 @@ restResources { tasks.named("dependencyLicenses").configure { mapping from: /azure-.*/, to: 'azure' mapping from: /jackson-.*/, to: 'jackson' - mapping from: /jersey-.*/, to: 'jersey' + mapping from: /netty-.*/, to: 'netty' mapping from: /jaxb-.*/, to: 'jaxb' mapping from: /stax-.*/, to: 'stax' + mapping from: /reactive-streams.*/, to: 'reactive-streams' } tasks.named("thirdPartyAudit").configure { ignoreMissingClasses( - // Optional and not enabled by Elasticsearch - 'org.slf4j.Logger', - 'org.slf4j.LoggerFactory' + // from reactory-netty metric collection + 'io.micrometer.core.instrument.Clock', + 'io.micrometer.core.instrument.Counter', + 'io.micrometer.core.instrument.Counter$Builder', + 'io.micrometer.core.instrument.DistributionSummary', + 'io.micrometer.core.instrument.DistributionSummary$Builder', + 'io.micrometer.core.instrument.Meter', + 'io.micrometer.core.instrument.MeterRegistry', + 'io.micrometer.core.instrument.Metrics', + 'io.micrometer.core.instrument.Tag', + 'io.micrometer.core.instrument.Tags', + 'io.micrometer.core.instrument.Timer', + 'io.micrometer.core.instrument.Timer$Builder', + 'io.micrometer.core.instrument.Timer$Sample', + 'io.micrometer.core.instrument.binder.jvm.ExecutorServiceMetrics', + 'io.micrometer.core.instrument.composite.CompositeMeterRegistry', + 'io.micrometer.core.instrument.search.Search', + 'io.micrometer.core.instrument.Gauge', + 'io.micrometer.core.instrument.Gauge$Builder', + + // from reactor-core kotlin extensions (to be deprecated from the library at some point on 3.3.x release) + 'kotlin.TypeCastException', + 'kotlin.collections.ArraysKt', + 'kotlin.jvm.JvmClassMappingKt', + 'kotlin.jvm.functions.Function0', + 'kotlin.jvm.functions.Function1', + 'kotlin.jvm.internal.FunctionReference', + 'kotlin.jvm.internal.Intrinsics', + 'kotlin.jvm.internal.Reflection', + 'kotlin.jvm.internal.markers.KMappedMarker', + 'kotlin.reflect.KClass', + 'kotlin.reflect.KDeclarationContainer', + 'kotlin.sequences.Sequence', + + // from io.netty.handler.codec.protobuf.ProtobufDecoder (netty) + 'com.google.protobuf.ExtensionRegistry', + 'com.google.protobuf.MessageLite$Builder', + 'com.google.protobuf.MessageLite', + 'com.google.protobuf.Parser', + 'com.google.protobuf.ExtensionRegistryLite', + 'com.google.protobuf.MessageLiteOrBuilder', + 'com.google.protobuf.nano.CodedOutputByteBufferNano', + 'com.google.protobuf.nano.MessageNano', + + // from io.netty.logging.CommonsLoggerFactory (netty) + 'org.apache.commons.logging.Log', + 'org.apache.commons.logging.LogFactory', + + // from Log4j (deliberate, Netty will fallback to Log4j 2) + 'org.apache.log4j.Level', + 'org.apache.log4j.Logger', + + // from io.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty) + 'org.bouncycastle.cert.X509v3CertificateBuilder', + 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', + 'org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder', + 'org.bouncycastle.jce.provider.BouncyCastleProvider', + 'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder', + 'org.bouncycastle.asn1.x500.X500Name', + + // from io.netty.handler.ssl.JettyNpnSslEngine (netty) + 'org.eclipse.jetty.npn.NextProtoNego$ClientProvider', + 'org.eclipse.jetty.npn.NextProtoNego$ServerProvider', + 'org.eclipse.jetty.npn.NextProtoNego', + + // from io.netty.handler.codec.marshalling.ChannelBufferByteInput (netty) + 'org.jboss.marshalling.ByteInput', + + // from io.netty.handler.codec.marshalling.ChannelBufferByteOutput (netty) + 'org.jboss.marshalling.ByteOutput', + + // from io.netty.handler.codec.marshalling.CompatibleMarshallingEncoder (netty) + 'org.jboss.marshalling.Marshaller', + + // from io.netty.handler.codec.marshalling.ContextBoundUnmarshallerProvider (netty) + 'org.jboss.marshalling.MarshallerFactory', + 'org.jboss.marshalling.MarshallingConfiguration', + 'org.jboss.marshalling.Unmarshaller', + + // from io.netty.handler.codec.spdy.SpdyHeaderBlockJZlibEncoder (netty-codec-http) + 'com.jcraft.jzlib.Deflater', + 'com.jcraft.jzlib.Inflater', + 'com.jcraft.jzlib.JZlib$WrapperType', + 'com.jcraft.jzlib.JZlib', + + // from io.netty.handler.codec.compression.LzfDecoder + // from io.netty.handler.codec.compression.LzfEncoder (netty-codec) + 'com.ning.compress.BufferRecycler', + 'com.ning.compress.lzf.ChunkDecoder', + 'com.ning.compress.lzf.ChunkEncoder', + 'com.ning.compress.lzf.LZFChunk', + 'com.ning.compress.lzf.LZFEncoder', + 'com.ning.compress.lzf.util.ChunkDecoderFactory', + 'com.ning.compress.lzf.util.ChunkEncoderFactory', + + // from io.netty.handler.codec.compression.LzmaFrameEncoder (netty-codec) + 'lzma.sdk.lzma.Encoder', + + // from io.netty.handler.codec.compression.Lz4FrameDecoder (netty-codec) + 'net.jpountz.lz4.LZ4Compressor', + 'net.jpountz.lz4.LZ4Factory', + 'net.jpountz.lz4.LZ4FastDecompressor', + + // from io.netty.handler.codec.compression.Lz4XXHash32 (netty-codec) + 'net.jpountz.xxhash.XXHash32', + 'net.jpountz.xxhash.XXHashFactory', + + // from io.netty.handler.ssl.JettyAlpnSslEngin (netty-handler optional dependency) + 'org.eclipse.jetty.alpn.ALPN$ClientProvider', + 'org.eclipse.jetty.alpn.ALPN$ServerProvider', + 'org.eclipse.jetty.alpn.ALPN', + + // from io.netty.handler.ssl.ConscryptAlpnSslEngine (netty-handler optional dependency) + 'org.conscrypt.AllocatedBuffer', + 'org.conscrypt.BufferAllocator', + 'org.conscrypt.Conscrypt', + 'org.conscrypt.HandshakeListener', + + // from io.netty.handler.ssl.OpenSslEngine (netty) + 'io.netty.internal.tcnative.Buffer', + 'io.netty.internal.tcnative.Library', + 'io.netty.internal.tcnative.SSL', + 'io.netty.internal.tcnative.SSLContext', + 'io.netty.internal.tcnative.SSLPrivateKeyMethod', + 'io.netty.internal.tcnative.CertificateCallback', + 'io.netty.internal.tcnative.CertificateVerifier', + 'io.netty.internal.tcnative.SessionTicketKey', + 'io.netty.internal.tcnative.SniHostNameMatcher', + + // from io.netty.util.internal.Hidden (netty-common optional dependency) + 'reactor.blockhound.BlockHound$Builder', + 'reactor.blockhound.integration.BlockHoundIntegration', + + // it uses NIO + 'io.netty.channel.kqueue.KQueue', + 'io.netty.channel.kqueue.KQueueDatagramChannel', + 'io.netty.channel.kqueue.KQueueEventLoopGroup', + 'io.netty.channel.kqueue.KQueueServerSocketChannel', + 'io.netty.channel.kqueue.KQueueSocketChannel', + 'io.netty.channel.epoll.Epoll', + 'io.netty.channel.epoll.EpollDatagramChannel', + 'io.netty.channel.epoll.EpollEventLoopGroup', + 'io.netty.channel.epoll.EpollServerSocketChannel', + 'io.netty.channel.epoll.EpollSocketChannel', + + // from reactor.netty.http.server.HttpServer (reactor-netty) + 'io.netty.handler.codec.haproxy.HAProxyMessage', + 'io.netty.handler.codec.haproxy.HAProxyMessageDecoder', + + // from com.ctc.wstx.osgi.WstxBundleActivator (woodstox-core) + 'org.osgi.framework.BundleActivator', + 'org.osgi.framework.BundleContext', + + // from com.ctc.wstx.shaded.msv_core.driver.textui.Driver (woodstox-core) + 'com.sun.org.apache.xml.internal.resolver.Catalog', + 'com.sun.org.apache.xml.internal.resolver.tools.CatalogResolver', + + 'org.slf4j.impl.StaticLoggerBinder', + 'org.slf4j.impl.StaticMDCBinder', + 'org.slf4j.impl.StaticMarkerBinder', ) ignoreViolations( - // uses internal java api: sun.misc.Unsafe - 'com.google.common.cache.Striped64', - 'com.google.common.cache.Striped64$1', - 'com.google.common.cache.Striped64$Cell', - 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', - 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', - 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$3', - 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', - 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', - 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', - 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', - 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1' + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', + + 'io.netty.util.internal.PlatformDependent0', + 'io.netty.util.internal.PlatformDependent0$1', + 'io.netty.util.internal.PlatformDependent0$2', + 'io.netty.util.internal.PlatformDependent0$3', + 'io.netty.util.internal.PlatformDependent0$5', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueConsumerNodeRef', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueProducerNodeRef', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueColdProducerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + + 'javax.activation.MailcapCommandMap', + 'javax.activation.MimetypesFileTypeMap', + + 'reactor.core.publisher.MultiProducerRingBuffer', + 'reactor.core.publisher.RingBufferFields', + 'reactor.core.publisher.Traces$SharedSecretsCallSiteSupplierFactory$TracingException', + 'reactor.core.publisher.UnsafeSequence', + 'reactor.core.publisher.UnsafeSupport' ) } - boolean useFixture = false def azureAddress = { assert useFixture: 'closure should not be used without a fixture' int ephemeralPort = project(':test:fixtures:azure-fixture').postProcessFixture.ext."test.fixtures.azure-fixture.tcp.8091" assert ephemeralPort > 0 - return 'ignored;DefaultEndpointsProtocol=http;BlobEndpoint=http://127.0.0.1:' + ephemeralPort + '/' + return 'ignored;DefaultEndpointsProtocol=http;BlobEndpoint=http://127.0.0.1:' + ephemeralPort + '/azure_integration_test_account' } String azureAccount = System.getenv("azure_storage_account") diff --git a/plugins/repository-azure/licenses/azure-LICENSE.txt b/plugins/repository-azure/licenses/azure-LICENSE.txt index d645695673349..49d21669aeefc 100644 --- a/plugins/repository-azure/licenses/azure-LICENSE.txt +++ b/plugins/repository-azure/licenses/azure-LICENSE.txt @@ -1,202 +1,21 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-NOTICE.txt b/plugins/repository-azure/licenses/azure-NOTICE.txt index 8d1c8b69c3fce..76791aa477500 100644 --- a/plugins/repository-azure/licenses/azure-NOTICE.txt +++ b/plugins/repository-azure/licenses/azure-NOTICE.txt @@ -1 +1,159 @@ - +NOTICES AND INFORMATION +Do Not Translate or Localize + +This software incorporates material from third parties. Microsoft makes certain +open source code available at https://3rdpartysource.microsoft.com, or you may +send a check or money order for US $5.00, including the product name, the open +source component name, and version number, to: + +Source Code Compliance Team +Microsoft Corporation +One Microsoft Way +Redmond, WA 98052 +USA + +Notwithstanding any other terms, you may reverse engineer this software to the +extent required to debug changes to any libraries licensed under the GNU Lesser +General Public License. + +------------------------------------------------------------------------------ + +Azure SDK for Java uses third-party libraries or other resources that may be +distributed under licenses different than the Azure SDK for Java software. + +In the event that we accidentally failed to list a required notice, please +bring it to our attention. Post an issue or email us: + + azjavasdkhelp@microsoft.com + +The attached notices are provided for information only. + +License notice for Hamcrest +------------------------------------------------------------------------------ + +The 3-Clause BSD License + +Copyright (c) 2000-2015 www.hamcrest.org +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +Redistributions of source code must retain the above copyright notice, this list of +conditions and the following disclaimer. Redistributions in binary form must reproduce +the above copyright notice, this list of conditions and the following disclaimer in +the documentation and/or other materials provided with the distribution. + +Neither the name of Hamcrest nor the names of its contributors may be used to endorse +or promote products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT +SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY +WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +License notice for Slf4j API +------------------------------------------------------------------------------ + + Copyright (c) 2004-2017 QOS.ch + All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +License notice for Slf4j Simple +------------------------------------------------------------------------------ + + Copyright (c) 2004-2017 QOS.ch + All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +License notice for Guava (https://github.com/google/guava) +------------------------------------------------------------------------------ + +Copyright (C) 2010 The Guava Authors + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +in compliance with the License. You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the License +is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express +or implied. See the License for the specific language governing permissions and limitations under +the License. + +License notice for Netty +------------------------------------------------------------------------------ + +Copyright 2014 The Netty Project + +The Netty Project licenses this file to you under the Apache License, +version 2.0 (the "License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +License for the specific language governing permissions and limitations +under the License. + +License notice for JUG Java Uuid Generator +------------------------------------------------------------------------------ + +JUG Java Uuid Generator + +Copyright (c) 2002- Tatu Saloranta, tatu.saloranta@iki.fi + +Licensed under the License specified in the file LICENSE which is +included with the source code. +You may not use this file except in compliance with the License. + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-1.10.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.10.0.jar.sha1 new file mode 100644 index 0000000000000..87047850d545e --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-1.10.0.jar.sha1 @@ -0,0 +1 @@ +d5e1258ba153b5e27c90b7c9cad262e6fc171d24 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.6.3.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.6.3.jar.sha1 new file mode 100644 index 0000000000000..59ae7ea8c65b4 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-http-netty-1.6.3.jar.sha1 @@ -0,0 +1 @@ +1c72bdc36faad65f53dd160becc38dd93a7356e2 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-keyvault-core-1.0.0.jar.sha1 b/plugins/repository-azure/licenses/azure-keyvault-core-1.0.0.jar.sha1 deleted file mode 100644 index 8b44e99aee15d..0000000000000 --- a/plugins/repository-azure/licenses/azure-keyvault-core-1.0.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e89dd5e621e21b753096ec6a03f203c01482c612 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-8.6.2.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-8.6.2.jar.sha1 deleted file mode 100644 index 0f8f24231fbdb..0000000000000 --- a/plugins/repository-azure/licenses/azure-storage-8.6.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d1b6de7264205e2441c667dfee5b002bbac61644 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.9.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.9.0.jar.sha1 new file mode 100644 index 0000000000000..f486ffca2bb46 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-storage-common-12.9.0.jar.sha1 @@ -0,0 +1 @@ +42d0439a676e51bb1dea809c60e8a925bb07477c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/commons-lang3-3.4.jar.sha1 b/plugins/repository-azure/licenses/commons-lang3-3.4.jar.sha1 deleted file mode 100644 index fdd7040377b8f..0000000000000 --- a/plugins/repository-azure/licenses/commons-lang3-3.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5fe28b9518e58819180a43a850fbc0dd24b7c050 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/commons-lang3-NOTICE.txt b/plugins/repository-azure/licenses/commons-lang3-NOTICE.txt deleted file mode 100644 index 078282451b679..0000000000000 --- a/plugins/repository-azure/licenses/commons-lang3-NOTICE.txt +++ /dev/null @@ -1,8 +0,0 @@ -Apache Commons Lang -Copyright 2001-2014 The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). - -This product includes software from the Spring Framework, -under the Apache License 2.0 (see: StringUtils.containsWhitespace()) diff --git a/plugins/repository-azure/licenses/guava-20.0.jar.sha1 b/plugins/repository-azure/licenses/guava-20.0.jar.sha1 deleted file mode 100644 index 7b6ae09060b29..0000000000000 --- a/plugins/repository-azure/licenses/guava-20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -89507701249388e1ed5ddcf8c41f4ce1be7831ef \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-LICENSE b/plugins/repository-azure/licenses/jackson-LICENSE new file mode 100644 index 0000000000000..f5f45d26a49d6 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-LICENSE @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor streaming parser/generator is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/plugins/repository-azure/licenses/jackson-NOTICE b/plugins/repository-azure/licenses/jackson-NOTICE new file mode 100644 index 0000000000000..4c976b7b4cc58 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-NOTICE @@ -0,0 +1,20 @@ +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. diff --git a/plugins/repository-azure/licenses/jackson-annotations-2.10.4.jar.sha1 b/plugins/repository-azure/licenses/jackson-annotations-2.10.4.jar.sha1 new file mode 100644 index 0000000000000..0c548bb0e7711 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-annotations-2.10.4.jar.sha1 @@ -0,0 +1 @@ +6ae6028aff033f194c9710ad87c224ccaadeed6c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.10.4.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.10.4.jar.sha1 new file mode 100644 index 0000000000000..27d5a72cd27af --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-databind-2.10.4.jar.sha1 @@ -0,0 +1 @@ +76e9152e93d4cf052f93a64596f633ba5b1c8ed9 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.10.4.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.10.4.jar.sha1 new file mode 100644 index 0000000000000..b079e3798154d --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.10.4.jar.sha1 @@ -0,0 +1 @@ +ffd80322264922e7edb6b35139ec1f2f55824156 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.10.4.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.10.4.jar.sha1 new file mode 100644 index 0000000000000..33135389f24df --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.10.4.jar.sha1 @@ -0,0 +1 @@ +de00203e6fee3493c8978a0064a3dda2e8373545 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.10.4.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.10.4.jar.sha1 new file mode 100644 index 0000000000000..ef26c940dbbe1 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.10.4.jar.sha1 @@ -0,0 +1 @@ +68364602aed552c0dcfc5743b393bad95c85b009 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jakarta.activation-api-1.2.1.jar.sha1 b/plugins/repository-azure/licenses/jakarta.activation-api-1.2.1.jar.sha1 new file mode 100644 index 0000000000000..de507235999c0 --- /dev/null +++ b/plugins/repository-azure/licenses/jakarta.activation-api-1.2.1.jar.sha1 @@ -0,0 +1 @@ +562a587face36ec7eff2db7f2fc95425c6602bc1 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jakarta.activation-api-LICENSE.txt b/plugins/repository-azure/licenses/jakarta.activation-api-LICENSE.txt new file mode 100644 index 0000000000000..02319e94f5f12 --- /dev/null +++ b/plugins/repository-azure/licenses/jakarta.activation-api-LICENSE.txt @@ -0,0 +1,11 @@ +Eclipse Distribution License - v 1.0 +Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors. + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. +Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. +Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jakarta.activation-api-NOTICE.txt b/plugins/repository-azure/licenses/jakarta.activation-api-NOTICE.txt new file mode 100644 index 0000000000000..d6492f81ccce4 --- /dev/null +++ b/plugins/repository-azure/licenses/jakarta.activation-api-NOTICE.txt @@ -0,0 +1,33 @@ +# Notices for Eclipse Project for JAF + +This content is produced and maintained by the Eclipse Project for JAF project. + +* Project home: https://projects.eclipse.org/projects/ee4j.jaf + +## Copyright + +All content is the property of the respective authors or their employers. For +more information regarding authorship of content, please consult the listed +source code repository logs. + +## Declared Project Licenses + +This program and the accompanying materials are made available under the terms +of the Eclipse Distribution License v. 1.0, +which is available at http://www.eclipse.org/org/documents/edl-v10.php. + +SPDX-License-Identifier: BSD-3-Clause + +## Source Code + +The project maintains the following source code repositories: + +* https://github.com/eclipse-ee4j/jaf + +## Third-party Content + +This project leverages the following third party content. + +JUnit (4.12) + +* License: Eclipse Public License diff --git a/plugins/repository-azure/licenses/jakarta.xml.bind-api-2.3.2.jar.sha1 b/plugins/repository-azure/licenses/jakarta.xml.bind-api-2.3.2.jar.sha1 new file mode 100644 index 0000000000000..c66f654e9b56c --- /dev/null +++ b/plugins/repository-azure/licenses/jakarta.xml.bind-api-2.3.2.jar.sha1 @@ -0,0 +1 @@ +8d49996a4338670764d7ca4b85a1c4ccf7fe665d \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jakarta.xml.bind-api-LICENSE.txt b/plugins/repository-azure/licenses/jakarta.xml.bind-api-LICENSE.txt new file mode 100644 index 0000000000000..02319e94f5f12 --- /dev/null +++ b/plugins/repository-azure/licenses/jakarta.xml.bind-api-LICENSE.txt @@ -0,0 +1,11 @@ +Eclipse Distribution License - v 1.0 +Copyright (c) 2007, Eclipse Foundation, Inc. and its licensors. + +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. +Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. +Neither the name of the Eclipse Foundation, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jakarta.xml.bind-api-NOTICE.txt b/plugins/repository-azure/licenses/jakarta.xml.bind-api-NOTICE.txt new file mode 100644 index 0000000000000..5aac171d23ff9 --- /dev/null +++ b/plugins/repository-azure/licenses/jakarta.xml.bind-api-NOTICE.txt @@ -0,0 +1,44 @@ +# Notices for Eclipse Project for JAXB + +This content is produced and maintained by the Eclipse Project for JAXB project. + +* Project home: https://projects.eclipse.org/projects/ee4j.jaxb + +## Trademarks + +Eclipse Project for JAXB is a trademark of the Eclipse Foundation. + +## Copyright + +All content is the property of the respective authors or their employers. For +more information regarding authorship of content, please consult the listed +source code repository logs. + +## Declared Project Licenses + +This program and the accompanying materials are made available under the terms +of the Eclipse Distribution License v. 1.0 which is available +at http://www.eclipse.org/org/documents/edl-v10.php. + +SPDX-License-Identifier: BSD-3-Clause + +## Source Code + +The project maintains the following source code repositories: + +* https://github.com/eclipse-ee4j/jaxb-api + +## Third-party Content + +This project leverages the following third party content. + +None + +## Cryptography + +Content may contain encryption software. The country in which you are currently +may have restrictions on the import, possession, and use, and/or re-export to +another country, of encryption software. BEFORE using any encryption software, +please check the country's laws, regulations and policies concerning the import, +possession, or use, and re-export of encryption software, to see if this is +permitted. diff --git a/plugins/repository-azure/licenses/commons-lang3-LICENSE.txt b/plugins/repository-azure/licenses/netty-LICENSE.txt similarity index 100% rename from plugins/repository-azure/licenses/commons-lang3-LICENSE.txt rename to plugins/repository-azure/licenses/netty-LICENSE.txt diff --git a/plugins/repository-azure/licenses/netty-NOTICE.txt b/plugins/repository-azure/licenses/netty-NOTICE.txt new file mode 100644 index 0000000000000..5bbf91a14de23 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-NOTICE.txt @@ -0,0 +1,116 @@ + + The Netty Project + ================= + +Please visit the Netty web site for more information: + + * http://netty.io/ + +Copyright 2011 The Netty Project + +The Netty Project licenses this file to you under the Apache License, +version 2.0 (the "License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +License for the specific language governing permissions and limitations +under the License. + +Also, please refer to each LICENSE..txt file, which is located in +the 'license' directory of the distribution file, for the license terms of the +components that this product depends on. + +------------------------------------------------------------------------------- +This product contains the extensions to Java Collections Framework which has +been derived from the works by JSR-166 EG, Doug Lea, and Jason T. Greene: + + * LICENSE: + * license/LICENSE.jsr166y.txt (Public Domain) + * HOMEPAGE: + * http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/ + * http://viewvc.jboss.org/cgi-bin/viewvc.cgi/jbosscache/experimental/jsr166/ + +This product contains a modified version of Robert Harder's Public Domain +Base64 Encoder and Decoder, which can be obtained at: + + * LICENSE: + * license/LICENSE.base64.txt (Public Domain) + * HOMEPAGE: + * http://iharder.sourceforge.net/current/java/base64/ + +This product contains a modified version of 'JZlib', a re-implementation of +zlib in pure Java, which can be obtained at: + + * LICENSE: + * license/LICENSE.jzlib.txt (BSD Style License) + * HOMEPAGE: + * http://www.jcraft.com/jzlib/ + +This product contains a modified version of 'Webbit', a Java event based +WebSocket and HTTP server: + + * LICENSE: + * license/LICENSE.webbit.txt (BSD License) + * HOMEPAGE: + * https://github.com/joewalnes/webbit + +This product optionally depends on 'Protocol Buffers', Google's data +interchange format, which can be obtained at: + + * LICENSE: + * license/LICENSE.protobuf.txt (New BSD License) + * HOMEPAGE: + * http://code.google.com/p/protobuf/ + +This product optionally depends on 'Bouncy Castle Crypto APIs' to generate +a temporary self-signed X.509 certificate when the JVM does not provide the +equivalent functionality. It can be obtained at: + + * LICENSE: + * license/LICENSE.bouncycastle.txt (MIT License) + * HOMEPAGE: + * http://www.bouncycastle.org/ + +This product optionally depends on 'SLF4J', a simple logging facade for Java, +which can be obtained at: + + * LICENSE: + * license/LICENSE.slf4j.txt (MIT License) + * HOMEPAGE: + * http://www.slf4j.org/ + +This product optionally depends on 'Apache Commons Logging', a logging +framework, which can be obtained at: + + * LICENSE: + * license/LICENSE.commons-logging.txt (Apache License 2.0) + * HOMEPAGE: + * http://commons.apache.org/logging/ + +This product optionally depends on 'Apache Log4J', a logging framework, +which can be obtained at: + + * LICENSE: + * license/LICENSE.log4j.txt (Apache License 2.0) + * HOMEPAGE: + * http://logging.apache.org/log4j/ + +This product optionally depends on 'JBoss Logging', a logging framework, +which can be obtained at: + + * LICENSE: + * license/LICENSE.jboss-logging.txt (GNU LGPL 2.1) + * HOMEPAGE: + * http://anonsvn.jboss.org/repos/common/common-logging-spi/ + +This product optionally depends on 'Apache Felix', an open source OSGi +framework implementation, which can be obtained at: + + * LICENSE: + * license/LICENSE.felix.txt (Apache License 2.0) + * HOMEPAGE: + * http://felix.apache.org/ diff --git a/plugins/repository-azure/licenses/netty-buffer-4.1.49.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-buffer-4.1.49.Final.jar.sha1 new file mode 100644 index 0000000000000..14da1fbad92f1 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-buffer-4.1.49.Final.jar.sha1 @@ -0,0 +1 @@ +8e819a81bca88d1e88137336f64531a53db0a4ad \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-4.1.49.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-4.1.49.Final.jar.sha1 new file mode 100644 index 0000000000000..6353dc0b7ada3 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-4.1.49.Final.jar.sha1 @@ -0,0 +1 @@ +20218de83c906348283f548c255650fd06030424 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http-4.1.49.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http-4.1.49.Final.jar.sha1 new file mode 100644 index 0000000000000..07651dd7f7682 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http-4.1.49.Final.jar.sha1 @@ -0,0 +1 @@ +4f30dbc462b26c588dffc0eb7552caef1a0f549e \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.49.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.49.Final.jar.sha1 new file mode 100644 index 0000000000000..d67715bbbe877 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.49.Final.jar.sha1 @@ -0,0 +1 @@ +ca35293757f80cd2460c80791757db261615dbe7 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.49.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.49.Final.jar.sha1 new file mode 100644 index 0000000000000..ef90665257b09 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.49.Final.jar.sha1 @@ -0,0 +1 @@ +df75527823f9fd13f6bd9d9098bd9eb786dcafb5 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-common-4.1.49.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-common-4.1.49.Final.jar.sha1 new file mode 100644 index 0000000000000..2c0aee66a9914 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-common-4.1.49.Final.jar.sha1 @@ -0,0 +1 @@ +927c8563a1662d869b145e70ce82ad89100f2c90 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-4.1.49.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-4.1.49.Final.jar.sha1 new file mode 100644 index 0000000000000..c6e2ae4fa045c --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-4.1.49.Final.jar.sha1 @@ -0,0 +1 @@ +c73443adb9d085d5dc2d5b7f3bdd91d5963976f7 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.49.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.49.Final.jar.sha1 new file mode 100644 index 0000000000000..024a87fe382a6 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.49.Final.jar.sha1 @@ -0,0 +1 @@ +6a2064cc62c7d18719742e1e101199c04c66356c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-4.1.49.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-4.1.49.Final.jar.sha1 new file mode 100644 index 0000000000000..986895a8ecf31 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-4.1.49.Final.jar.sha1 @@ -0,0 +1 @@ +eb81e1f0eaa99e75983bf3d28cae2b103e0f3a34 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-4.1.49.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-4.1.49.Final.jar.sha1 new file mode 100644 index 0000000000000..175b8c84a8824 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-4.1.49.Final.jar.sha1 @@ -0,0 +1 @@ +415ea7f326635743aec952fe2349ca45959e94a7 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactive-streams-1.0.3.jar.sha1 b/plugins/repository-azure/licenses/reactive-streams-1.0.3.jar.sha1 new file mode 100644 index 0000000000000..77210f7c7b402 --- /dev/null +++ b/plugins/repository-azure/licenses/reactive-streams-1.0.3.jar.sha1 @@ -0,0 +1 @@ +d9fb7a7926ffa635b3dcaa5049fb2bfa25b3e7d0 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactive-streams-LICENSE.txt b/plugins/repository-azure/licenses/reactive-streams-LICENSE.txt new file mode 100644 index 0000000000000..1e141c13ddba2 --- /dev/null +++ b/plugins/repository-azure/licenses/reactive-streams-LICENSE.txt @@ -0,0 +1,7 @@ +MIT No Attribution + +Copyright 2014 Reactive Streams + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/plugins/repository-azure/licenses/guava-NOTICE.txt b/plugins/repository-azure/licenses/reactive-streams-NOTICE.txt similarity index 100% rename from plugins/repository-azure/licenses/guava-NOTICE.txt rename to plugins/repository-azure/licenses/reactive-streams-NOTICE.txt diff --git a/plugins/repository-azure/licenses/reactor-core-3.3.10.RELEASE.jar.sha1 b/plugins/repository-azure/licenses/reactor-core-3.3.10.RELEASE.jar.sha1 new file mode 100644 index 0000000000000..181cb897756ed --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-core-3.3.10.RELEASE.jar.sha1 @@ -0,0 +1 @@ +f5787f994a9a810c0986418232e06fcf4afc1216 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-core-LICENSE.txt b/plugins/repository-azure/licenses/reactor-core-LICENSE.txt new file mode 100644 index 0000000000000..d5dd862b1759b --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-core-LICENSE.txt @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/plugins/repository-azure/licenses/reactor-core-NOTICE.txt b/plugins/repository-azure/licenses/reactor-core-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-azure/licenses/reactor-netty-0.9.12.RELEASE.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-0.9.12.RELEASE.jar.sha1 new file mode 100644 index 0000000000000..92f7fafc27375 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-0.9.12.RELEASE.jar.sha1 @@ -0,0 +1 @@ +41022546d07f1499fb9d8617bba4a1a89d3549db \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-LICENSE.txt b/plugins/repository-azure/licenses/reactor-netty-LICENSE.txt new file mode 100644 index 0000000000000..d5dd862b1759b --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-LICENSE.txt @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/plugins/repository-azure/licenses/reactor-netty-NOTICE.txt b/plugins/repository-azure/licenses/reactor-netty-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-azure/licenses/slf4j-api-1.6.2.jar.sha1 b/plugins/repository-azure/licenses/slf4j-api-1.6.2.jar.sha1 new file mode 100644 index 0000000000000..a2f93ea55802b --- /dev/null +++ b/plugins/repository-azure/licenses/slf4j-api-1.6.2.jar.sha1 @@ -0,0 +1 @@ +8619e95939167fb37245b5670135e4feb0ec7d50 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/slf4j-api-LICENSE.txt b/plugins/repository-azure/licenses/slf4j-api-LICENSE.txt new file mode 100644 index 0000000000000..52055e61de46f --- /dev/null +++ b/plugins/repository-azure/licenses/slf4j-api-LICENSE.txt @@ -0,0 +1,21 @@ +Copyright (c) 2004-2014 QOS.ch +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/plugins/repository-azure/licenses/slf4j-api-NOTICE.txt b/plugins/repository-azure/licenses/slf4j-api-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-azure/licenses/stax2-api-4.2.jar.sha1 b/plugins/repository-azure/licenses/stax2-api-4.2.jar.sha1 new file mode 100644 index 0000000000000..58abf43f36bc7 --- /dev/null +++ b/plugins/repository-azure/licenses/stax2-api-4.2.jar.sha1 @@ -0,0 +1 @@ +13c2b30926bca0429c704c4b4ca0b5d0432b69cd \ No newline at end of file diff --git a/plugins/repository-azure/licenses/stax2-api-LICENSE.txt b/plugins/repository-azure/licenses/stax2-api-LICENSE.txt new file mode 100644 index 0000000000000..06e1dd27053f2 --- /dev/null +++ b/plugins/repository-azure/licenses/stax2-api-LICENSE.txt @@ -0,0 +1,12 @@ +BSD-2-Clause + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + = FasterXML.com + = 2010- diff --git a/plugins/repository-azure/licenses/stax2-api-NOTICE.txt b/plugins/repository-azure/licenses/stax2-api-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-azure/licenses/woodstox-core-6.0.2.jar.sha1 b/plugins/repository-azure/licenses/woodstox-core-6.0.2.jar.sha1 new file mode 100644 index 0000000000000..7d2fa5254ef55 --- /dev/null +++ b/plugins/repository-azure/licenses/woodstox-core-6.0.2.jar.sha1 @@ -0,0 +1 @@ +bbd163bbdb4d6340298b61a6789cc174fb589868 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/guava-LICENSE.txt b/plugins/repository-azure/licenses/woodstox-core-LICENSE similarity index 100% rename from plugins/repository-azure/licenses/guava-LICENSE.txt rename to plugins/repository-azure/licenses/woodstox-core-LICENSE diff --git a/plugins/repository-azure/licenses/woodstox-core-NOTICE b/plugins/repository-azure/licenses/woodstox-core-NOTICE new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java b/plugins/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java index 3e305c1cb1baf..340123a9108af 100644 --- a/plugins/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java +++ b/plugins/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureBlobStoreRepositoryTests.java @@ -18,15 +18,16 @@ */ package org.elasticsearch.repositories.azure; -import com.microsoft.azure.storage.Constants; -import com.microsoft.azure.storage.RetryExponentialRetry; -import com.microsoft.azure.storage.RetryPolicyFactory; -import com.microsoft.azure.storage.blob.BlobRequestOptions; +import com.azure.storage.common.policy.RequestRetryOptions; +import com.azure.storage.common.policy.RetryPolicyType; import com.sun.net.httpserver.Headers; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import fixture.azure.AzureHttpHandler; import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; @@ -35,7 +36,9 @@ import org.elasticsearch.repositories.blobstore.ESMockAPIBasedRepositoryIntegTestCase; import org.elasticsearch.rest.RestStatus; +import java.io.ByteArrayInputStream; import java.io.IOException; +import java.io.InputStream; import java.nio.charset.StandardCharsets; import java.util.Base64; import java.util.Collection; @@ -44,9 +47,13 @@ import java.util.function.Predicate; import java.util.regex.Pattern; +import static org.hamcrest.Matchers.equalTo; + @SuppressForbidden(reason = "this test uses a HttpServer to emulate an Azure endpoint") public class AzureBlobStoreRepositoryTests extends ESMockAPIBasedRepositoryIntegTestCase { + private static final String DEFAULT_ACCOUNT_NAME = "account"; + @Override protected String repositoryType() { return AzureRepository.TYPE; @@ -68,22 +75,25 @@ protected Collection> nodePlugins() { @Override protected Map createHttpHandlers() { - return Collections.singletonMap("/container", new AzureHTTPStatsCollectorHandler(new AzureBlobStoreHttpHandler("container"))); + return Collections.singletonMap("/" + DEFAULT_ACCOUNT_NAME, + new AzureHTTPStatsCollectorHandler(new AzureBlobStoreHttpHandler(DEFAULT_ACCOUNT_NAME, "container"))); } @Override protected HttpHandler createErroneousHttpHandler(final HttpHandler delegate) { - return new AzureErroneousHttpHandler(delegate, randomIntBetween(2, 3)); + return new AzureErroneousHttpHandler(delegate, AzureStorageSettings.DEFAULT_MAX_RETRIES); } @Override protected Settings nodeSettings(int nodeOrdinal) { final String key = Base64.getEncoder().encodeToString(randomAlphaOfLength(10).getBytes(StandardCharsets.UTF_8)); final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString(AzureStorageSettings.ACCOUNT_SETTING.getConcreteSettingForNamespace("test").getKey(), "account"); + String accountName = DEFAULT_ACCOUNT_NAME; + secureSettings.setString(AzureStorageSettings.ACCOUNT_SETTING.getConcreteSettingForNamespace("test").getKey(), accountName); secureSettings.setString(AzureStorageSettings.KEY_SETTING.getConcreteSettingForNamespace("test").getKey(), key); - final String endpoint = "ignored;DefaultEndpointsProtocol=http;BlobEndpoint=" + httpServerUrl(); + // see com.azure.storage.blob.BlobUrlParts.parseIpUrl + final String endpoint = "ignored;DefaultEndpointsProtocol=http;BlobEndpoint=" + httpServerUrl() + "/" + accountName; return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) .put(AzureStorageSettings.ENDPOINT_SUFFIX_SETTING.getConcreteSettingForNamespace("test").getKey(), endpoint) @@ -102,18 +112,23 @@ public TestAzureRepositoryPlugin(Settings settings) { } @Override - AzureStorageService createAzureStoreService(final Settings settings) { - return new AzureStorageService(settings) { + AzureStorageService createAzureStorageService(Settings settings, AzureClientProvider azureClientProvider) { + return new AzureStorageService(settings, azureClientProvider) { + @Override + RequestRetryOptions getRetryOptions(LocationMode locationMode, AzureStorageSettings azureStorageSettings) { + return new RequestRetryOptions(RetryPolicyType.EXPONENTIAL, + azureStorageSettings.getMaxRetries() + 1, 5, + 1L, 15L, null); + } + @Override - RetryPolicyFactory createRetryPolicy(final AzureStorageSettings azureStorageSettings) { - return new RetryExponentialRetry(1, 100, 500, azureStorageSettings.getMaxRetries()); + long getUploadBlockSize() { + return ByteSizeUnit.MB.toBytes(1); } @Override - BlobRequestOptions getBlobRequestOptionsForWriteBlob() { - BlobRequestOptions options = new BlobRequestOptions(); - options.setSingleBlobPutThresholdInBytes(Math.toIntExact(ByteSizeUnit.MB.toBytes(1))); - return options; + long getSizeThresholdForMultiBlockUpload() { + return ByteSizeUnit.MB.toBytes(1); } }; } @@ -122,8 +137,8 @@ BlobRequestOptions getBlobRequestOptionsForWriteBlob() { @SuppressForbidden(reason = "this test uses a HttpHandler to emulate an Azure endpoint") private static class AzureBlobStoreHttpHandler extends AzureHttpHandler implements BlobStoreHttpHandler { - AzureBlobStoreHttpHandler(final String container) { - super(container); + AzureBlobStoreHttpHandler(final String account, final String container) { + super(account, container); } } @@ -152,8 +167,8 @@ protected void handleAsError(final HttpExchange exchange) throws IOException { @Override protected String requestUniqueId(final HttpExchange exchange) { - final String requestId = exchange.getRequestHeaders().getFirst(Constants.HeaderConstants.CLIENT_REQUEST_ID_HEADER); - final String range = exchange.getRequestHeaders().getFirst(Constants.HeaderConstants.STORAGE_RANGE_HEADER); + final String requestId = exchange.getRequestHeaders().getFirst("X-ms-client-request-id"); + final String range = exchange.getRequestHeaders().getFirst("Content-Range"); return exchange.getRequestMethod() + " " + requestId + (range != null ? " " + range : ""); @@ -165,8 +180,8 @@ protected String requestUniqueId(final HttpExchange exchange) { */ @SuppressForbidden(reason = "this test uses a HttpServer to emulate an Azure endpoint") private static class AzureHTTPStatsCollectorHandler extends HttpStatsCollectorHandler { - - private static final Predicate listPattern = Pattern.compile("GET /[a-zA-Z0-9]+\\??.+").asMatchPredicate(); + private static final Predicate LIST_PATTERN = Pattern.compile("GET /[a-zA-Z0-9]+/[a-zA-Z0-9]+\\?.+").asMatchPredicate(); + private static final Predicate GET_BLOB_PATTERN = Pattern.compile("GET /[a-zA-Z0-9]+/[a-zA-Z0-9]+/.+").asMatchPredicate(); private AzureHTTPStatsCollectorHandler(HttpHandler delegate) { super(delegate); @@ -174,11 +189,11 @@ private AzureHTTPStatsCollectorHandler(HttpHandler delegate) { @Override protected void maybeTrack(String request, Headers headers) { - if (Regex.simpleMatch("GET /*/*", request)) { + if (GET_BLOB_PATTERN.test(request)) { trackRequest("GetBlob"); - } else if (Regex.simpleMatch("HEAD /*/*", request)) { + } else if (Regex.simpleMatch("HEAD /*/*/*", request)) { trackRequest("GetBlobProperties"); - } else if (listPattern.test(request)) { + } else if (LIST_PATTERN.test(request)) { trackRequest("ListBlobs"); } else if (isPutBlock(request)) { trackRequest("PutBlock"); @@ -199,4 +214,21 @@ private boolean isPutBlockList(String request) { return Regex.simpleMatch("PUT /*/*?*comp=blocklist*", request); } } + + public void testLargeBlobCountDeletion() throws Exception { + int numberOfBlobs = randomIntBetween(257, 2000); + try (BlobStore store = newBlobStore()) { + final BlobContainer container = store.blobContainer(new BlobPath()); + for (int i = 0; i < numberOfBlobs; i++) { + byte[] bytes = randomBytes(randomInt(100)); + try (InputStream inputStream = new ByteArrayInputStream(bytes)) { + String blobName = randomAlphaOfLength(10); + container.writeBlob(blobName, inputStream, bytes.length, false); + } + } + + container.delete(); + assertThat(container.listBlobs().size(), equalTo(0)); + } + } } diff --git a/plugins/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java b/plugins/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java index 985fae740702e..77f14b1df061f 100644 --- a/plugins/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java +++ b/plugins/repository-azure/src/internalClusterTest/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java @@ -19,15 +19,13 @@ package org.elasticsearch.repositories.azure; -import com.microsoft.azure.storage.OperationContext; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.CloudBlobClient; -import com.microsoft.azure.storage.blob.CloudBlobContainer; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.BlobServiceClient; +import com.azure.storage.blob.models.BlobStorageException; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.Settings; @@ -37,7 +35,6 @@ import java.net.HttpURLConnection; import java.util.Collection; -import java.util.function.Supplier; import static org.hamcrest.Matchers.blankOrNullString; import static org.hamcrest.Matchers.equalTo; @@ -103,16 +100,19 @@ private void ensureSasTokenPermissions() { final PlainActionFuture future = PlainActionFuture.newFuture(); repository.threadPool().generic().execute(ActionRunnable.wrap(future, l -> { final AzureBlobStore blobStore = (AzureBlobStore) repository.blobStore(); - final String account = "default"; - final Tuple> client = blobStore.getService().client(account); - final CloudBlobContainer blobContainer = client.v1().getContainerReference(blobStore.toString()); + final AzureBlobServiceClient azureBlobServiceClient = + blobStore.getService().client("default", LocationMode.PRIMARY_ONLY); + final BlobServiceClient client = azureBlobServiceClient.getSyncClient(); try { - SocketAccess.doPrivilegedException(() -> blobContainer.exists(null, null, client.v2().get())); + SocketAccess.doPrivilegedException(() -> { + final BlobContainerClient blobContainer = client.getBlobContainerClient(blobStore.toString()); + return blobContainer.exists(); + }); future.onFailure(new RuntimeException( "The SAS token used in this test allowed for checking container existence. This test only supports tokens " + "that grant only the documented permission requirements for the Azure repository plugin.")); - } catch (StorageException e) { - if (e.getHttpStatusCode() == HttpURLConnection.HTTP_FORBIDDEN) { + } catch (BlobStorageException e) { + if (e.getStatusCode() == HttpURLConnection.HTTP_FORBIDDEN) { future.onResponse(null); } else { future.onFailure(e); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java index 2fad555c968c5..734ad8bc8c205 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java @@ -19,54 +19,40 @@ package org.elasticsearch.repositories.azure; -import com.microsoft.azure.storage.Constants; -import com.microsoft.azure.storage.LocationMode; -import com.microsoft.azure.storage.StorageException; +import com.azure.storage.blob.models.BlobStorageException; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.action.ActionRunnable; -import org.elasticsearch.action.support.GroupedActionListener; -import org.elasticsearch.action.support.PlainActionFuture; +import org.apache.logging.log4j.core.util.Throwables; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetadata; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; -import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; import java.io.InputStream; -import java.net.HttpURLConnection; -import java.net.URISyntaxException; import java.nio.file.NoSuchFileException; import java.util.List; import java.util.Map; -import java.util.concurrent.ExecutorService; +import java.util.stream.Collectors; public class AzureBlobContainer extends AbstractBlobContainer { private final Logger logger = LogManager.getLogger(AzureBlobContainer.class); private final AzureBlobStore blobStore; - private final ThreadPool threadPool; private final String keyPath; - AzureBlobContainer(BlobPath path, AzureBlobStore blobStore, ThreadPool threadPool) { + AzureBlobContainer(BlobPath path, AzureBlobStore blobStore) { super(path); this.blobStore = blobStore; this.keyPath = path.buildAsString(); - this.threadPool = threadPool; } @Override - public boolean blobExists(String blobName) { + public boolean blobExists(String blobName) throws IOException { logger.trace("blobExists({})", blobName); - try { - return blobStore.blobExists(buildKey(blobName)); - } catch (URISyntaxException | StorageException e) { - logger.warn("can not access [{}] in container {{}}: {}", blobName, blobStore, e.getMessage()); - } - return false; + return blobStore.blobExists(buildKey(blobName)); } private InputStream openInputStream(String blobName, long position, @Nullable Long length) throws IOException { @@ -82,13 +68,14 @@ private InputStream openInputStream(String blobName, long position, @Nullable Lo } try { return blobStore.getInputStream(buildKey(blobName), position, length); - } catch (StorageException e) { - if (e.getHttpStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) { - throw new NoSuchFileException(e.getMessage()); + } catch (Exception e) { + Throwable rootCause = Throwables.getRootCause(e); + if (rootCause instanceof BlobStorageException) { + if (((BlobStorageException) rootCause).getStatusCode() == 404) { + throw new NoSuchFileException("Blob [" + blobName + "] not found"); + } } - throw new IOException(e); - } catch (URISyntaxException e) { - throw new IOException(e); + throw new IOException("Unable to get input stream for blob [" + blobName + "]", e); } } @@ -104,17 +91,13 @@ public InputStream readBlob(String blobName, long position, long length) throws @Override public long readBlobPreferredLength() { - return Constants.DEFAULT_MINIMUM_READ_SIZE_IN_BYTES; + return blobStore.getReadChunkSize(); } @Override public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { logger.trace("writeBlob({}, stream, {})", buildKey(blobName), blobSize); - try { - blobStore.writeBlob(buildKey(blobName), inputStream, blobSize, failIfAlreadyExists); - } catch (URISyntaxException|StorageException e) { - throw new IOException("Can not write blob " + blobName, e); - } + blobStore.writeBlob(buildKey(blobName), inputStream, blobSize, failIfAlreadyExists); } @Override @@ -124,56 +107,22 @@ public void writeBlobAtomic(String blobName, InputStream inputStream, long blobS @Override public DeleteResult delete() throws IOException { - try { - return blobStore.deleteBlobDirectory(keyPath, threadPool.executor(AzureRepositoryPlugin.REPOSITORY_THREAD_POOL_NAME)); - } catch (URISyntaxException | StorageException e) { - throw new IOException(e); - } + return blobStore.deleteBlobDirectory(keyPath); } @Override public void deleteBlobsIgnoringIfNotExists(List blobNames) throws IOException { - final PlainActionFuture result = PlainActionFuture.newFuture(); - if (blobNames.isEmpty()) { - result.onResponse(null); - } else { - final GroupedActionListener listener = - new GroupedActionListener<>(result.map(v -> null), blobNames.size()); - final ExecutorService executor = threadPool.executor(AzureRepositoryPlugin.REPOSITORY_THREAD_POOL_NAME); - // Executing deletes in parallel since Azure SDK 8 is using blocking IO while Azure does not provide a bulk delete API endpoint - // TODO: Upgrade to newer non-blocking Azure SDK 11 and execute delete requests in parallel that way. - for (String blobName : blobNames) { - executor.execute(ActionRunnable.run(listener, () -> { - logger.trace("deleteBlob({})", blobName); - try { - blobStore.deleteBlob(buildKey(blobName)); - } catch (StorageException e) { - if (e.getHttpStatusCode() != HttpURLConnection.HTTP_NOT_FOUND) { - throw new IOException(e); - } - } catch (URISyntaxException e) { - throw new IOException(e); - } - })); - } - } - try { - result.actionGet(); - } catch (Exception e) { - throw new IOException("Exception during bulk delete", e); - } + List blobsWithFullPath = blobNames.stream() + .map(this::buildKey) + .collect(Collectors.toList()); + + blobStore.deleteBlobList(blobsWithFullPath); } @Override public Map listBlobsByPrefix(@Nullable String prefix) throws IOException { logger.trace("listBlobsByPrefix({})", prefix); - - try { - return blobStore.listBlobsByPrefix(keyPath, prefix); - } catch (URISyntaxException | StorageException e) { - logger.warn("can not access [{}] in container {{}}: {}", prefix, blobStore, e.getMessage()); - throw new IOException(e); - } + return blobStore.listBlobsByPrefix(keyPath, prefix); } @Override @@ -185,11 +134,7 @@ public Map listBlobs() throws IOException { @Override public Map children() throws IOException { final BlobPath path = path(); - try { - return blobStore.children(path); - } catch (URISyntaxException | StorageException e) { - throw new IOException("Failed to list children in path [" + path.buildAsString() + "].", e); - } + return blobStore.children(path); } protected String buildKey(String blobName) { diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobServiceClient.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobServiceClient.java new file mode 100644 index 0000000000000..cb200bd778918 --- /dev/null +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobServiceClient.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.azure; + +import com.azure.storage.blob.BlobServiceAsyncClient; +import com.azure.storage.blob.BlobServiceClient; +import io.netty.buffer.ByteBufAllocator; + +class AzureBlobServiceClient { + private final BlobServiceClient blobServiceClient; + private final BlobServiceAsyncClient blobAsyncClient; + private final int maxRetries; + private final ByteBufAllocator allocator; + + AzureBlobServiceClient(BlobServiceClient blobServiceClient, + BlobServiceAsyncClient blobAsyncClient, + int maxRetries, + ByteBufAllocator allocator) { + this.blobServiceClient = blobServiceClient; + this.blobAsyncClient = blobAsyncClient; + this.maxRetries = maxRetries; + this.allocator = allocator; + } + + BlobServiceClient getSyncClient() { + return blobServiceClient; + } + + BlobServiceAsyncClient getAsyncClient() { + return blobAsyncClient; + } + + public ByteBufAllocator getAllocator() { + return allocator; + } + + int getMaxRetries() { + return maxRetries; + } +} diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index 01a64a229ff55..1f4c79f278be7 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -19,26 +19,29 @@ package org.elasticsearch.repositories.azure; -import com.microsoft.azure.storage.AccessCondition; -import com.microsoft.azure.storage.LocationMode; -import com.microsoft.azure.storage.OperationContext; -import com.microsoft.azure.storage.RequestCompletedEvent; -import com.microsoft.azure.storage.StorageErrorCodeStrings; -import com.microsoft.azure.storage.StorageEvent; -import com.microsoft.azure.storage.StorageException; -import com.microsoft.azure.storage.blob.BlobListingDetails; -import com.microsoft.azure.storage.blob.BlobProperties; -import com.microsoft.azure.storage.blob.CloudBlob; -import com.microsoft.azure.storage.blob.CloudBlobClient; -import com.microsoft.azure.storage.blob.CloudBlobContainer; -import com.microsoft.azure.storage.blob.CloudBlobDirectory; -import com.microsoft.azure.storage.blob.CloudBlockBlob; -import com.microsoft.azure.storage.blob.DeleteSnapshotsOption; -import com.microsoft.azure.storage.blob.ListBlobItem; +import com.azure.core.http.rest.ResponseBase; +import com.azure.storage.blob.BlobAsyncClient; +import com.azure.storage.blob.BlobClient; +import com.azure.storage.blob.BlobContainerAsyncClient; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.BlobServiceAsyncClient; +import com.azure.storage.blob.BlobServiceClient; +import com.azure.storage.blob.models.BlobErrorCode; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobItemProperties; +import com.azure.storage.blob.models.BlobListDetails; +import com.azure.storage.blob.models.BlobRange; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.models.DownloadRetryOptions; +import com.azure.storage.blob.models.ListBlobsOptions; +import com.azure.storage.blob.models.ParallelTransferOptions; +import com.azure.storage.blob.options.BlobParallelUploadOptions; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.util.ReferenceCountUtil; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.blobstore.BlobContainer; @@ -47,92 +50,131 @@ import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.blobstore.support.PlainBlobMetadata; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.repositories.azure.AzureRepository.Repository; -import org.elasticsearch.threadpool.ThreadPool; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; -import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.net.HttpURLConnection; import java.net.URI; import java.net.URISyntaxException; +import java.net.URL; +import java.nio.ByteBuffer; import java.nio.file.FileAlreadyExistsException; +import java.util.ArrayDeque; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; -import java.util.EnumSet; import java.util.HashMap; -import java.util.HashSet; +import java.util.List; import java.util.Map; -import java.util.concurrent.Executor; +import java.util.Objects; +import java.util.Queue; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; -import java.util.function.Consumer; -import java.util.function.Function; -import java.util.function.Supplier; -import java.util.stream.Collectors; - -import static java.util.Collections.emptyMap; +import java.util.function.BiConsumer; +import java.util.function.BiPredicate; public class AzureBlobStore implements BlobStore { - private static final Logger logger = LogManager.getLogger(AzureBlobStore.class); + private static final long DEFAULT_READ_CHUNK_SIZE = new ByteSizeValue(32, ByteSizeUnit.MB).getBytes(); private final AzureStorageService service; - private final ThreadPool threadPool; private final String clientName; private final String container; private final LocationMode locationMode; private final Stats stats = new Stats(); + private final BiConsumer statsConsumer; - private final Consumer getMetricsCollector; - private final Consumer listMetricsCollector; - private final Consumer uploadMetricsCollector; - - public AzureBlobStore(RepositoryMetadata metadata, AzureStorageService service, ThreadPool threadPool) { + public AzureBlobStore(RepositoryMetadata metadata, AzureStorageService service) { this.container = Repository.CONTAINER_SETTING.get(metadata.settings()); this.clientName = Repository.CLIENT_NAME.get(metadata.settings()); this.service = service; - this.threadPool = threadPool; // locationMode is set per repository, not per client this.locationMode = Repository.LOCATION_MODE_SETTING.get(metadata.settings()); - final Map prevSettings = this.service.refreshAndClearCache(emptyMap()); - final Map newSettings = AzureStorageSettings.overrideLocationMode(prevSettings, this.locationMode); - this.service.refreshAndClearCache(newSettings); - this.getMetricsCollector = (httpURLConnection) -> { - if (httpURLConnection.getRequestMethod().equals("HEAD")) { - stats.headOperations.incrementAndGet(); - return; - } - assert httpURLConnection.getRequestMethod().equals("GET"); - stats.getOperations.incrementAndGet(); - }; - this.listMetricsCollector = (httpURLConnection) -> { - assert httpURLConnection.getRequestMethod().equals("GET"); - stats.listOperations.incrementAndGet(); - }; - this.uploadMetricsCollector = (httpURLConnection -> { - assert httpURLConnection.getRequestMethod().equals("PUT"); - String queryParams = httpURLConnection.getURL().getQuery() == null ? "" : httpURLConnection.getURL().getQuery(); - - // https://docs.microsoft.com/en-us/rest/api/storageservices/put-block - // https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-list - if (queryParams.contains("comp=block") && queryParams.contains("blockid=")) { - stats.putBlockOperations.incrementAndGet(); - } else if (queryParams.contains("comp=blocklist")) { - stats.putBlockListOperations.incrementAndGet(); - } else { + List requestStatsCollectors = List.of( + RequestStatsCollector.create( + (httpMethod, url) -> httpMethod.equals("HEAD"), + stats.headOperations::incrementAndGet + ), + RequestStatsCollector.create( + (httpMethod, url) -> httpMethod.equals("GET") && isListRequest(httpMethod, url) == false, + stats.getOperations::incrementAndGet + ), + RequestStatsCollector.create( + this::isListRequest, + stats.listOperations::incrementAndGet + ), + RequestStatsCollector.create( + this::isPutBlockRequest, + stats.putBlockOperations::incrementAndGet + ), + RequestStatsCollector.create( + this::isPutBlockListRequest, + stats.putBlockListOperations::incrementAndGet + ), + RequestStatsCollector.create( // https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob#uri-parameters // The only URI parameter allowed for put-blob operation is "timeout", but if a sas token is used, // it's possible that the URI parameters contain additional parameters unrelated to the upload type. - stats.putOperations.incrementAndGet(); + (httpMethod, url) -> httpMethod.equals("PUT") && + isPutBlockRequest(httpMethod, url) == false && + isPutBlockListRequest(httpMethod, url) == false, + stats.putOperations::incrementAndGet + ) + ); + + this.statsConsumer = (httpMethod, url) -> { + try { + URI uri = url.toURI(); + String path = uri.getPath() == null ? "" : uri.getPath(); + // Batch delete requests + if (path.contains(container) == false) { + return; + } + assert path.contains(container) : uri.toString(); + } catch (URISyntaxException ignored) { + return; } - }); + + for (RequestStatsCollector requestStatsCollector : requestStatsCollectors) { + if (requestStatsCollector.shouldConsumeRequestInfo(httpMethod, url)) { + requestStatsCollector.consumeHttpRequestInfo(); + return; + } + } + }; + } + + private boolean isListRequest(String httpMethod, URL url) { + return httpMethod.equals("GET") && + url.getQuery() != null && + url.getQuery().contains("comp=list") && + url.getQuery().contains("delimiter="); + } + + // https://docs.microsoft.com/en-us/rest/api/storageservices/put-block + private boolean isPutBlockRequest(String httpMethod, URL url) { + String queryParams = url.getQuery() == null ? "" : url.getQuery(); + return httpMethod.equals("PUT") && + queryParams.contains("comp=block") && + queryParams.contains("blockid="); + } + + // https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-list + private boolean isPutBlockListRequest(String httpMethod, URL url) { + String queryParams = url.getQuery() == null ? "" : url.getQuery(); + return httpMethod.equals("PUT") && + queryParams.contains("comp=blocklist"); + } + + public long getReadChunkSize() { + return DEFAULT_READ_CHUNK_SIZE; } @Override @@ -153,213 +195,233 @@ public LocationMode getLocationMode() { @Override public BlobContainer blobContainer(BlobPath path) { - return new AzureBlobContainer(path, this, threadPool); + return new AzureBlobContainer(path, this); } @Override public void close() { } - public boolean blobExists(String blob) throws URISyntaxException, StorageException { - // Container name must be lower case. - final Tuple> client = client(); - final OperationContext context = hookMetricCollector(client.v2().get(), getMetricsCollector); - final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); - return SocketAccess.doPrivilegedException(() -> { - final CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blob); - return azureBlob.exists(null, null, context); - }); - } + public boolean blobExists(String blob) throws IOException { + final BlobServiceClient client = client(); - public void deleteBlob(String blob) throws URISyntaxException, StorageException { - final Tuple> client = client(); - // Container name must be lower case. - final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); - logger.trace(() -> new ParameterizedMessage("delete blob for container [{}], blob [{}]", container, blob)); - SocketAccess.doPrivilegedVoidException(() -> { - final CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blob); - logger.trace(() -> new ParameterizedMessage("container [{}]: blob [{}] found. removing.", container, blob)); - azureBlob.delete(DeleteSnapshotsOption.NONE, null, null, client.v2().get()); - }); + try { + Boolean blobExists = SocketAccess.doPrivilegedException(() -> { + final BlobClient azureBlob = client.getBlobContainerClient(container).getBlobClient(blob); + return azureBlob.exists(); + }); + return Boolean.TRUE.equals(blobExists); + } catch (Exception e) { + logger.trace("can not access [{}] in container {{}}: {}", blob, container, e.getMessage()); + throw new IOException("Unable to check if blob " + blob + " exists", e); + } } - public DeleteResult deleteBlobDirectory(String path, Executor executor) - throws URISyntaxException, StorageException, IOException { - final Tuple> client = client(); - final OperationContext context = hookMetricCollector(client.v2().get(), listMetricsCollector); - final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); - final Collection exceptions = Collections.synchronizedList(new ArrayList<>()); - final AtomicLong outstanding = new AtomicLong(1L); - final PlainActionFuture result = PlainActionFuture.newFuture(); - final AtomicLong blobsDeleted = new AtomicLong(); - final AtomicLong bytesDeleted = new AtomicLong(); - SocketAccess.doPrivilegedVoidException(() -> { - for (final ListBlobItem blobItem : blobContainer.listBlobs(path, true, - EnumSet.noneOf(BlobListingDetails.class), null, context)) { - // uri.getPath is of the form /container/keyPath.* and we want to strip off the /container/ - // this requires 1 + container.length() + 1, with each 1 corresponding to one of the / - final String blobPath = blobItem.getUri().getPath().substring(1 + container.length() + 1); - outstanding.incrementAndGet(); - executor.execute(new AbstractRunnable() { - @Override - protected void doRun() throws Exception { - final long len; - if (blobItem instanceof CloudBlob) { - len = ((CloudBlob) blobItem).getProperties().getLength(); + public DeleteResult deleteBlobDirectory(String path) throws IOException { + final AtomicInteger blobsDeleted = new AtomicInteger(0); + final AtomicLong bytesDeleted = new AtomicLong(0); + + try { + final BlobServiceClient client = client(); + SocketAccess.doPrivilegedVoidException(() -> { + final BlobContainerClient blobContainerClient = client.getBlobContainerClient(container); + final BlobContainerAsyncClient blobContainerAsyncClient = asyncClient().getBlobContainerAsyncClient(container); + final Queue directories = new ArrayDeque<>(); + directories.offer(path); + String directoryName; + List> deleteTasks = new ArrayList<>(); + while ((directoryName = directories.poll()) != null) { + final BlobListDetails blobListDetails = new BlobListDetails() + .setRetrieveMetadata(true); + + final ListBlobsOptions options = new ListBlobsOptions() + .setPrefix(directoryName) + .setDetails(blobListDetails); + + for (BlobItem blobItem : blobContainerClient.listBlobsByHierarchy("/", options, null)) { + if (blobItem.isPrefix() != null && blobItem.isPrefix()) { + directories.offer(blobItem.getName()); } else { - len = -1L; + BlobAsyncClient blobAsyncClient = blobContainerAsyncClient.getBlobAsyncClient(blobItem.getName()); + deleteTasks.add(blobAsyncClient.delete()); + bytesDeleted.addAndGet(blobItem.getProperties().getContentLength()); + blobsDeleted.incrementAndGet(); } - deleteBlob(blobPath); - blobsDeleted.incrementAndGet(); - if (len >= 0) { - bytesDeleted.addAndGet(len); - } - } - - @Override - public void onFailure(Exception e) { - exceptions.add(e); } + } - @Override - public void onAfter() { - if (outstanding.decrementAndGet() == 0) { - result.onResponse(null); - } - } - }); - } - }); - if (outstanding.decrementAndGet() == 0) { - result.onResponse(null); - } - result.actionGet(); - if (exceptions.isEmpty() == false) { - final IOException ex = new IOException("Deleting directory [" + path + "] failed"); - exceptions.forEach(ex::addSuppressed); - throw ex; + executeDeleteTasks(deleteTasks); + }); + } catch (Exception e) { + throw new IOException("Deleting directory [" + path + "] failed", e); } + return new DeleteResult(blobsDeleted.get(), bytesDeleted.get()); } - public InputStream getInputStream(String blob, long position, @Nullable Long length) throws URISyntaxException, StorageException { - final Tuple> client = client(); - final OperationContext context = hookMetricCollector(client.v2().get(), getMetricsCollector); - final CloudBlockBlob blockBlobReference = client.v1().getContainerReference(container).getBlockBlobReference(blob); - logger.trace(() -> new ParameterizedMessage("reading container [{}], blob [{}]", container, blob)); - final long limit; - if (length == null) { - // Loading the blob attributes so we can get its length - SocketAccess.doPrivilegedVoidException(() -> blockBlobReference.downloadAttributes(null, null, context)); - limit = blockBlobReference.getProperties().getLength() - position; - } - else { - limit = length; + void deleteBlobList(List blobs) throws IOException { + if (blobs.isEmpty()) { + return; } - final BlobInputStream blobInputStream = new BlobInputStream(limit, blockBlobReference, position, context); - if (length != null) { - // pre-filling the buffer in case of ranged reads so this method throws a 404 storage exception right away in case the blob - // does not exist - blobInputStream.fill(); + + try { + BlobServiceAsyncClient asyncClient = asyncClient(); + SocketAccess.doPrivilegedVoidException(() -> { + List> deleteTasks = new ArrayList<>(blobs.size()); + final BlobContainerAsyncClient blobContainerClient = asyncClient.getBlobContainerAsyncClient(container); + for (String blob : blobs) { + deleteTasks.add(blobContainerClient.getBlobAsyncClient(blob).delete()); + } + + executeDeleteTasks(deleteTasks); + }); + } catch (BlobStorageException e) { + if (e.getStatusCode() != 404) { + throw new IOException("Unable to delete blobs " + blobs, e); + } + } catch (Exception e) { + throw new IOException("Unable to delete blobs " + blobs, e); } - return blobInputStream; } - public Map listBlobsByPrefix(String keyPath, String prefix) - throws URISyntaxException, StorageException { - // NOTE: this should be here: if (prefix == null) prefix = ""; - // however, this is really inefficient since deleteBlobsByPrefix enumerates everything and - // then does a prefix match on the result; it should just call listBlobsByPrefix with the prefix! - final var blobsBuilder = new HashMap(); - final EnumSet enumBlobListingDetails = EnumSet.of(BlobListingDetails.METADATA); - final Tuple> client = client(); - final OperationContext context = hookMetricCollector(client.v2().get(), listMetricsCollector); - final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); - logger.trace(() -> new ParameterizedMessage("listing container [{}], keyPath [{}], prefix [{}]", container, keyPath, prefix)); - SocketAccess.doPrivilegedVoidException(() -> { - for (final ListBlobItem blobItem : blobContainer.listBlobs(keyPath + (prefix == null ? "" : prefix), false, - enumBlobListingDetails, null, context)) { - final URI uri = blobItem.getUri(); - logger.trace(() -> new ParameterizedMessage("blob url [{}]", uri)); - // uri.getPath is of the form /container/keyPath.* and we want to strip off the /container/ - // this requires 1 + container.length() + 1, with each 1 corresponding to one of the / - final String blobPath = uri.getPath().substring(1 + container.length() + 1); - if (blobItem instanceof CloudBlob) { - final BlobProperties properties = ((CloudBlob) blobItem).getProperties(); - final String name = blobPath.substring(keyPath.length()); - logger.trace(() -> new ParameterizedMessage("blob url [{}], name [{}], size [{}]", uri, name, properties.getLength())); - blobsBuilder.put(name, new PlainBlobMetadata(name, properties.getLength())); - } + private void executeDeleteTasks(List> deleteTasks) { + Flux.merge(deleteTasks) + .collectList() + .block(); + } + + public InputStream getInputStream(String blob, long position, final @Nullable Long length) throws IOException { + logger.trace(() -> new ParameterizedMessage("reading container [{}], blob [{}]", container, blob)); + final AzureBlobServiceClient azureBlobServiceClient = getAzureBlobServiceClientClient(); + final BlobServiceClient syncClient = azureBlobServiceClient.getSyncClient(); + final BlobServiceAsyncClient asyncClient = azureBlobServiceClient.getAsyncClient(); + + return SocketAccess.doPrivilegedException(() ->{ + final BlobContainerClient blobContainerClient = syncClient.getBlobContainerClient(container); + final BlobClient blobClient = blobContainerClient.getBlobClient(blob); + final long totalSize; + if (length == null) { + totalSize = blobClient.getProperties().getBlobSize(); + } else { + totalSize = position + length; } + BlobAsyncClient blobAsyncClient = asyncClient.getBlobContainerAsyncClient(container).getBlobAsyncClient(blob); + int maxReadRetries = service.getMaxReadRetries(clientName); + return new AzureInputStream(blobAsyncClient, position, length == null ? totalSize : length , totalSize, maxReadRetries, + azureBlobServiceClient.getAllocator()); }); + } + + public Map listBlobsByPrefix(String keyPath, String prefix) throws IOException { + final var blobsBuilder = new HashMap(); + logger.trace(() -> + new ParameterizedMessage("listing container [{}], keyPath [{}], prefix [{}]", container, keyPath, prefix)); + try { + final BlobServiceClient client = client(); + SocketAccess.doPrivilegedVoidException(() -> { + final BlobContainerClient containerClient = client.getBlobContainerClient(container); + final BlobListDetails details = new BlobListDetails().setRetrieveMetadata(true); + final ListBlobsOptions listBlobsOptions = new ListBlobsOptions() + .setPrefix(keyPath + (prefix == null ? "" : prefix)) + .setDetails(details); + + for (final BlobItem blobItem : containerClient.listBlobsByHierarchy("/", listBlobsOptions, null)) { + BlobItemProperties properties = blobItem.getProperties(); + Boolean isPrefix = blobItem.isPrefix(); + if (isPrefix != null && isPrefix) { + continue; + } + String blobName = blobItem.getName().substring(keyPath.length()); + + blobsBuilder.put(blobName, + new PlainBlobMetadata(blobName, properties.getContentLength())); + } + }); + } catch (Exception e) { + throw new IOException("Unable to list blobs by prefix [" + prefix + "] for path " + keyPath, e); + } return Map.copyOf(blobsBuilder); } - public Map children(BlobPath path) throws URISyntaxException, StorageException { - final var blobsBuilder = new HashSet(); - final Tuple> client = client(); - final OperationContext context = hookMetricCollector(client.v2().get(), listMetricsCollector); - final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); + public Map children(BlobPath path) throws IOException { + final var childrenBuilder = new HashMap(); final String keyPath = path.buildAsString(); - final EnumSet enumBlobListingDetails = EnumSet.of(BlobListingDetails.METADATA); - - SocketAccess.doPrivilegedVoidException(() -> { - for (ListBlobItem blobItem : blobContainer.listBlobs(keyPath, false, enumBlobListingDetails, null, context)) { - if (blobItem instanceof CloudBlobDirectory) { - final URI uri = blobItem.getUri(); - logger.trace(() -> new ParameterizedMessage("blob url [{}]", uri)); - // uri.getPath is of the form /container/keyPath.* and we want to strip off the /container/ - // this requires 1 + container.length() + 1, with each 1 corresponding to one of the /. - // Lastly, we add the length of keyPath to the offset to strip this container's path. - final String uriPath = uri.getPath(); - blobsBuilder.add(uriPath.substring(1 + container.length() + 1 + keyPath.length(), uriPath.length() - 1)); + + try { + final BlobServiceClient client = client(); + SocketAccess.doPrivilegedVoidException(() -> { + BlobContainerClient blobContainer = client.getBlobContainerClient(container); + final ListBlobsOptions listBlobsOptions = new ListBlobsOptions(); + listBlobsOptions.setPrefix(keyPath).setDetails(new BlobListDetails().setRetrieveMetadata(true)); + for (final BlobItem blobItem : blobContainer.listBlobsByHierarchy("/", listBlobsOptions, null)) { + Boolean isPrefix = blobItem.isPrefix(); + if (isPrefix != null && isPrefix) { + String directoryName = blobItem.getName(); + directoryName = directoryName.substring(keyPath.length()); + if (directoryName.isEmpty()) { + continue; + } + // Remove trailing slash + directoryName = directoryName.substring(0, directoryName.length() - 1); + childrenBuilder.put(directoryName, + new AzureBlobContainer(BlobPath.cleanPath().add(blobItem.getName()), this)); + } } - } - }); + }); + } catch (Exception e) { + throw new IOException("Unable to provide children blob containers for " + path, e); + } - return Collections.unmodifiableMap(blobsBuilder.stream().collect( - Collectors.toMap(Function.identity(), name -> new AzureBlobContainer(path.add(name), this, threadPool)))); + return Collections.unmodifiableMap(childrenBuilder); } - public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) - throws URISyntaxException, StorageException, IOException { + public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { assert inputStream.markSupported() : "Should not be used with non-mark supporting streams as their retry handling in the SDK is broken"; logger.trace(() -> new ParameterizedMessage("writeBlob({}, stream, {})", blobName, blobSize)); - final Tuple> client = client(); - final OperationContext operationContext = hookMetricCollector(client().v2().get(), uploadMetricsCollector); - final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); - final CloudBlockBlob blob = blobContainer.getBlockBlobReference(blobName); try { - final AccessCondition accessCondition = - failIfAlreadyExists ? AccessCondition.generateIfNotExistsCondition() : AccessCondition.generateEmptyCondition(); - SocketAccess.doPrivilegedVoidException(() -> - blob.upload(inputStream, blobSize, accessCondition, service.getBlobRequestOptionsForWriteBlob(), operationContext)); - } catch (final StorageException se) { - if (failIfAlreadyExists && se.getHttpStatusCode() == HttpURLConnection.HTTP_CONFLICT && - StorageErrorCodeStrings.BLOB_ALREADY_EXISTS.equals(se.getErrorCode())) { - throw new FileAlreadyExistsException(blobName, null, se.getMessage()); + final BlobServiceClient client = client(); + SocketAccess.doPrivilegedVoidException(() -> { + final BlobClient blob = client.getBlobContainerClient(container) + .getBlobClient(blobName); + + ParallelTransferOptions parallelTransferOptions = getParallelTransferOptions(); + BlobParallelUploadOptions blobParallelUploadOptions = + new BlobParallelUploadOptions(inputStream, blobSize) + .setParallelTransferOptions(parallelTransferOptions); + blob.uploadWithResponse(blobParallelUploadOptions, null, null); + }); + } catch (final BlobStorageException e) { + if (failIfAlreadyExists && e.getStatusCode() == HttpURLConnection.HTTP_CONFLICT && + BlobErrorCode.BLOB_ALREADY_EXISTS.equals(e.getErrorCode())) { + throw new FileAlreadyExistsException(blobName, null, e.getMessage()); } - throw se; + throw new IOException("Unable to write blob " + blobName, e); + } catch (Exception e) { + throw new IOException("Unable to write blob " + blobName, e); } + logger.trace(() -> new ParameterizedMessage("writeBlob({}, stream, {}) - done", blobName, blobSize)); } - private Tuple> client() { - return service.client(clientName); + private ParallelTransferOptions getParallelTransferOptions() { + ParallelTransferOptions parallelTransferOptions = new ParallelTransferOptions(); + parallelTransferOptions.setBlockSizeLong(service.getUploadBlockSize()) + .setMaxSingleUploadSizeLong(service.getSizeThresholdForMultiBlockUpload()) + .setMaxConcurrency(service.getMaxUploadParallelism()); + return parallelTransferOptions; } - private OperationContext hookMetricCollector(OperationContext context, Consumer metricCollector) { - context.getRequestCompletedEventHandler().addListener(new StorageEvent<>() { - @Override - public void eventOccurred(RequestCompletedEvent eventArg) { - int statusCode = eventArg.getRequestResult().getStatusCode(); - if (statusCode < 300) { - metricCollector.accept((HttpURLConnection) eventArg.getConnectionObject()); - } - } - }); - return context; + private BlobServiceClient client() { + return getAzureBlobServiceClientClient().getSyncClient(); + } + + private BlobServiceAsyncClient asyncClient() { + return getAzureBlobServiceClientClient().getAsyncClient(); + } + + private AzureBlobServiceClient getAzureBlobServiceClientClient() { + return service.client(clientName, locationMode, statsConsumer); } @Override @@ -391,96 +453,142 @@ private Map toMap() { } } - /** - * Building our own input stream instead of using the SDK's {@link com.microsoft.azure.storage.blob.BlobInputStream} - * because that stream is highly inefficient in both memory and CPU use. - */ - private static class BlobInputStream extends InputStream { - - /** - * Maximum number of bytes to fetch per read request and thus to buffer on heap at a time. - * Set to 4M because that's what {@link com.microsoft.azure.storage.blob.BlobInputStream} uses. - */ - private static final int MAX_READ_CHUNK_SIZE = ByteSizeUnit.MB.toIntBytes(4); - - /** - * Using a {@link ByteArrayOutputStream} as a buffer instead of a byte array since the byte array APIs on the SDK are less - * efficient. - */ - private final ByteArrayOutputStream buffer; - - private final long limit; - - private final CloudBlockBlob blockBlobReference; + private static class AzureInputStream extends InputStream { + private final CancellableRateLimitedFluxIterator cancellableRateLimitedFluxIterator; + private ByteBuf byteBuf; + private boolean closed; + private final ByteBufAllocator allocator; + + private AzureInputStream(final BlobAsyncClient client, + long rangeOffset, + long rangeLength, + long contentLength, + int maxRetries, + ByteBufAllocator allocator) throws IOException { + rangeLength = Math.min(rangeLength, contentLength - rangeOffset); + final BlobRange range = new BlobRange(rangeOffset, rangeLength); + DownloadRetryOptions downloadRetryOptions = new DownloadRetryOptions() + .setMaxRetryRequests(maxRetries); + Flux byteBufFlux = + client.downloadWithResponse(range, downloadRetryOptions, null, false) + .flux() + .concatMap(ResponseBase::getValue) // it's important to use concatMap, since flatMap doesn't provide ordering + // guarantees and that's not fun to debug :( + .filter(Objects::nonNull) + .map(this::copyBuffer); // Sadly we have to copy the buffers since the memory is released after the flux execution + // ends and we need that the byte buffer outlives that lifecycle. Since the SDK provides an + // ByteBuffer instead of a ByteBuf we cannot just increase the ref count and release the + // memory later on. + this.allocator = allocator; + + // On the transport layer we read the recv buffer in 64kb chunks, but later on those buffers are + // split into 8kb chunks (see HttpObjectDecoder), so we request upstream the equivalent to 64kb. (i.e. 8 elements per batch * + // 8kb) + this.cancellableRateLimitedFluxIterator = + new CancellableRateLimitedFluxIterator<>(8, ReferenceCountUtil::safeRelease); + // Read eagerly the first chunk so we can throw early if the + // blob doesn't exist + byteBufFlux.subscribe(cancellableRateLimitedFluxIterator); + getNextByteBuf(); + } - private final long start; + private ByteBuf copyBuffer(ByteBuffer buffer) { + ByteBuf byteBuf = allocator.heapBuffer(buffer.remaining(), buffer.remaining()); + byteBuf.writeBytes(buffer); + return byteBuf; + } - private final OperationContext context; + @Override + public int read() throws IOException { + byte[] b = new byte[1]; + return read(b, 0, 1); + } - // current read position on the byte array backing #buffer - private int pos; + @Override + public int read(byte[] b, int off, int len) throws IOException { + if (off < 0 || len < 0 || len > b.length - off) { + throw new IndexOutOfBoundsException(); + } - // current position up to which the contents of the blob where buffered - private long offset; + ByteBuf buffer = getNextByteBuf(); + if (buffer == null || buffer.readableBytes() == 0) { + releaseByteBuf(buffer); + return -1; + } - BlobInputStream(long limit, CloudBlockBlob blockBlobReference, long start, OperationContext context) { - this.limit = limit; - this.blockBlobReference = blockBlobReference; - this.start = start; - this.context = context; - buffer = new ByteArrayOutputStream(Math.min(MAX_READ_CHUNK_SIZE, Math.toIntExact(Math.min(limit, Integer.MAX_VALUE)))) { - @Override - public byte[] toByteArray() { - return buf; + int totalBytesRead = 0; + while (buffer != null && totalBytesRead < len) { + int toRead = Math.min(len - totalBytesRead, buffer.readableBytes()); + buffer.readBytes(b, off + totalBytesRead, toRead); + totalBytesRead += toRead; + if (buffer.readableBytes() == 0) { + releaseByteBuf(buffer); + buffer = getNextByteBuf(); } - }; - pos = 0; - offset = 0; + } + + return totalBytesRead; } @Override - public int read() throws IOException { - try { - fill(); - } catch (StorageException | URISyntaxException ex) { - throw new IOException(ex); - } - if (pos == buffer.size()) { - return -1; + public void close() { + if (closed == false) { + cancellableRateLimitedFluxIterator.cancel(); + closed = true; + releaseByteBuf(byteBuf); } - return buffer.toByteArray()[pos++]; } @Override - public int read(byte[] b, int off, int len) throws IOException { - try { - fill(); - } catch (StorageException | URISyntaxException ex) { - throw new IOException(ex); - } - final int buffered = buffer.size(); - int remaining = buffered - pos; - if (len > 0 && remaining == 0) { - return -1; - } - final int toRead = Math.min(remaining, len); - System.arraycopy(buffer.toByteArray(), pos, b, off, toRead); - pos += toRead; - return toRead; + public long skip(long n) { + throw new UnsupportedOperationException("skip is not supported"); } - void fill() throws StorageException, URISyntaxException { - if (pos == buffer.size()) { - final long toFill = Math.min(limit - this.offset, MAX_READ_CHUNK_SIZE); - if (toFill <= 0L) { - return; + private void releaseByteBuf(ByteBuf buf) { + ReferenceCountUtil.safeRelease(buf); + this.byteBuf = null; + } + + @Nullable + private ByteBuf getNextByteBuf() throws IOException { + try { + if (byteBuf == null && cancellableRateLimitedFluxIterator.hasNext() == false) { + return null; + } + + if (byteBuf != null) { + return byteBuf; } - buffer.reset(); - SocketAccess.doPrivilegedVoidException(() -> blockBlobReference.downloadRange( - start + this.offset, toFill, buffer, null, null, context)); - this.offset += buffer.size(); - pos = 0; + + byteBuf = cancellableRateLimitedFluxIterator.next(); + return byteBuf; + } catch (Exception e) { + throw new IOException("Unable to read blob", e.getCause()); } } } + + private static class RequestStatsCollector { + private final BiPredicate filter; + private final Runnable onHttpRequest; + + private RequestStatsCollector(BiPredicate filter, + Runnable onHttpRequest) { + this.filter = filter; + this.onHttpRequest = onHttpRequest; + } + + static RequestStatsCollector create(BiPredicate filter, + Runnable consumer) { + return new RequestStatsCollector(filter, consumer); + } + + private boolean shouldConsumeRequestInfo(String httpMethod, URL url) { + return filter.test(httpMethod, url); + } + + private void consumeHttpRequestInfo() { + onHttpRequest.run(); + } + } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureClientProvider.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureClientProvider.java new file mode 100644 index 0000000000000..08e7c115f3129 --- /dev/null +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureClientProvider.java @@ -0,0 +1,294 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.azure; + +import com.azure.core.http.HttpClient; +import com.azure.core.http.HttpMethod; +import com.azure.core.http.HttpPipelineCallContext; +import com.azure.core.http.HttpPipelineNextPolicy; +import com.azure.core.http.HttpRequest; +import com.azure.core.http.HttpResponse; +import com.azure.core.http.ProxyOptions; +import com.azure.core.http.netty.NettyAsyncHttpClientBuilder; +import com.azure.core.http.policy.HttpPipelinePolicy; +import com.azure.core.util.logging.ClientLogger; +import com.azure.storage.blob.BlobServiceAsyncClient; +import com.azure.storage.blob.BlobServiceClient; +import com.azure.storage.blob.BlobServiceClientBuilder; +import com.azure.storage.common.implementation.connectionstring.StorageConnectionString; +import com.azure.storage.common.policy.RequestRetryOptions; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.PooledByteBufAllocator; +import io.netty.channel.ChannelOption; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.repositories.azure.executors.PrivilegedExecutor; +import org.elasticsearch.repositories.azure.executors.ReactorScheduledExecutorService; +import org.elasticsearch.threadpool.ThreadPool; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Scheduler; +import reactor.core.scheduler.Schedulers; +import reactor.netty.resources.ConnectionProvider; + +import java.io.IOException; +import java.net.URL; +import java.time.Duration; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.ThreadFactory; +import java.util.function.BiConsumer; + +import static org.elasticsearch.repositories.azure.AzureRepositoryPlugin.NETTY_EVENT_LOOP_THREAD_POOL_NAME; +import static org.elasticsearch.repositories.azure.AzureRepositoryPlugin.REPOSITORY_THREAD_POOL_NAME; + +class AzureClientProvider extends AbstractLifecycleComponent { + private static final TimeValue DEFAULT_CONNECTION_TIMEOUT = TimeValue.timeValueSeconds(30); + private static final TimeValue DEFAULT_MAX_CONNECTION_IDLE_TIME = TimeValue.timeValueSeconds(60); + private static final int DEFAULT_MAX_CONNECTIONS = 50; + private static final int DEFAULT_EVENT_LOOP_THREAD_COUNT = Math.min(Runtime.getRuntime().availableProcessors(), 8) * 2; + private static final int PENDING_CONNECTION_QUEUE_SIZE = -1; // see ConnectionProvider.ConnectionPoolSpec.pendingAcquireMaxCount + + static final Setting EVENT_LOOP_THREAD_COUNT = Setting.intSetting( + "repository.azure.http_client.event_loop_executor_thread_count", + DEFAULT_EVENT_LOOP_THREAD_COUNT, + 1, + Setting.Property.NodeScope); + + static final Setting MAX_OPEN_CONNECTIONS = Setting.intSetting( + "repository.azure.http_client.max_open_connections", + DEFAULT_MAX_CONNECTIONS, + 1, + Setting.Property.NodeScope); + + static final Setting OPEN_CONNECTION_TIMEOUT = Setting.timeSetting( + "repository.azure.http_client.connection_timeout", + DEFAULT_CONNECTION_TIMEOUT, + Setting.Property.NodeScope); + + static final Setting MAX_IDLE_TIME = Setting.timeSetting( + "repository.azure.http_client.connection_max_idle_time", + DEFAULT_MAX_CONNECTION_IDLE_TIME, + Setting.Property.NodeScope); + + private final ThreadPool threadPool; + private final String reactorExecutorName; + private final EventLoopGroup eventLoopGroup; + private final ConnectionProvider connectionProvider; + private final ByteBufAllocator byteBufAllocator; + private final ClientLogger clientLogger = new ClientLogger(AzureClientProvider.class); + private volatile boolean closed = false; + + AzureClientProvider(ThreadPool threadPool, + String reactorExecutorName, + EventLoopGroup eventLoopGroup, + ConnectionProvider connectionProvider, + ByteBufAllocator byteBufAllocator) { + this.threadPool = threadPool; + this.reactorExecutorName = reactorExecutorName; + this.eventLoopGroup = eventLoopGroup; + this.connectionProvider = connectionProvider; + this.byteBufAllocator = byteBufAllocator; + } + + static int eventLoopThreadsFromSettings(Settings settings) { + return EVENT_LOOP_THREAD_COUNT.get(settings); + } + + static AzureClientProvider create(ThreadPool threadPool, Settings settings) { + final ExecutorService eventLoopExecutor = threadPool.executor(NETTY_EVENT_LOOP_THREAD_POOL_NAME); + // Most of the code that needs special permissions (i.e. jackson serializers generation) is executed + // in the event loop executor. That's the reason why we should provide an executor that allows the + // execution of privileged code + final EventLoopGroup eventLoopGroup = new NioEventLoopGroup(eventLoopThreadsFromSettings(settings), + new PrivilegedExecutor(eventLoopExecutor)); + + final TimeValue openConnectionTimeout = OPEN_CONNECTION_TIMEOUT.get(settings); + final TimeValue maxIdleTime = MAX_IDLE_TIME.get(settings); + + ConnectionProvider provider = + ConnectionProvider.builder("azure-sdk-connection-pool") + .maxConnections(MAX_OPEN_CONNECTIONS.get(settings)) + .pendingAcquireMaxCount(PENDING_CONNECTION_QUEUE_SIZE) // This determines the max outstanding queued requests + .pendingAcquireTimeout(Duration.ofMillis(openConnectionTimeout.millis())) + .maxIdleTime(Duration.ofMillis(maxIdleTime.millis())) + .build(); + + ByteBufAllocator pooledByteBufAllocator = createByteBufAllocator(); + + // Just to verify that this executor exists + threadPool.executor(REPOSITORY_THREAD_POOL_NAME); + return new AzureClientProvider(threadPool, REPOSITORY_THREAD_POOL_NAME, eventLoopGroup, provider, pooledByteBufAllocator); + } + + private static ByteBufAllocator createByteBufAllocator() { + int nHeapArena = PooledByteBufAllocator.defaultNumHeapArena(); + int pageSize = PooledByteBufAllocator.defaultPageSize(); + int maxOrder = PooledByteBufAllocator.defaultMaxOrder(); + int tinyCacheSize = PooledByteBufAllocator.defaultTinyCacheSize(); + int smallCacheSize = PooledByteBufAllocator.defaultSmallCacheSize(); + int normalCacheSize = PooledByteBufAllocator.defaultNormalCacheSize(); + boolean useCacheForAllThreads = PooledByteBufAllocator.defaultUseCacheForAllThreads(); + + return new PooledByteBufAllocator(false, + nHeapArena, + 0, + pageSize, + maxOrder, + tinyCacheSize, + smallCacheSize, + normalCacheSize, + useCacheForAllThreads); + } + + AzureBlobServiceClient createClient(AzureStorageSettings settings, + LocationMode locationMode, + RequestRetryOptions retryOptions, + ProxyOptions proxyOptions, + BiConsumer successfulRequestConsumer) { + if (closed) { + throw new IllegalStateException("AzureClientProvider is already closed"); + } + + reactor.netty.http.client.HttpClient nettyHttpClient = reactor.netty.http.client.HttpClient.create(connectionProvider); + nettyHttpClient = nettyHttpClient + .port(80) + .wiretap(false); + + nettyHttpClient = nettyHttpClient.tcpConfiguration(tcpClient -> { + tcpClient = tcpClient.runOn(eventLoopGroup); + tcpClient = tcpClient.option(ChannelOption.ALLOCATOR, byteBufAllocator); + return tcpClient; + }); + + final HttpClient httpClient = new NettyAsyncHttpClientBuilder(nettyHttpClient) + .disableBufferCopy(true) + .proxy(proxyOptions) + .build(); + + final String connectionString = settings.getConnectString(); + + BlobServiceClientBuilder builder = new BlobServiceClientBuilder() + .connectionString(connectionString) + .httpClient(httpClient) + .retryOptions(retryOptions); + + if (successfulRequestConsumer != null) { + builder.addPolicy(new SuccessfulRequestTracker(successfulRequestConsumer)); + } + + if (locationMode.isSecondary()) { + // TODO: maybe extract this logic so we don't need to have a client logger around? + StorageConnectionString storageConnectionString = StorageConnectionString.create(connectionString, clientLogger); + String secondaryUri = storageConnectionString.getBlobEndpoint().getSecondaryUri(); + if (secondaryUri == null) { + throw new IllegalArgumentException("Unable to configure an AzureClient using a secondary location without a secondary " + + "endpoint"); + } + + builder.endpoint(secondaryUri); + } + + BlobServiceClient blobServiceClient = SocketAccess.doPrivilegedException(builder::buildClient); + BlobServiceAsyncClient asyncClient = SocketAccess.doPrivilegedException(builder::buildAsyncClient); + return new AzureBlobServiceClient(blobServiceClient, asyncClient, settings.getMaxRetries(), byteBufAllocator); + } + + @Override + protected void doStart() { + ReactorScheduledExecutorService executorService = new ReactorScheduledExecutorService(threadPool, reactorExecutorName) { + @Override + protected Runnable decorateRunnable(Runnable command) { + return () -> SocketAccess.doPrivilegedVoidException(command::run); + } + + @Override + protected Callable decorateCallable(Callable callable) { + return () -> SocketAccess.doPrivilegedException(callable::call); + } + }; + + // The only way to configure the schedulers used by the SDK is to inject a new global factory. This is a bit ugly... + // See https://github.com/Azure/azure-sdk-for-java/issues/17272 for a feature request to avoid this need. + Schedulers.setFactory(new Schedulers.Factory() { + @Override + public Scheduler newParallel(int parallelism, ThreadFactory threadFactory) { + return Schedulers.fromExecutor(executorService); + } + + @Override + public Scheduler newElastic(int ttlSeconds, ThreadFactory threadFactory) { + return Schedulers.fromExecutor(executorService); + } + + @Override + public Scheduler newBoundedElastic(int threadCap, int queuedTaskCap, ThreadFactory threadFactory, int ttlSeconds) { + return Schedulers.fromExecutor(executorService); + } + + @Override + public Scheduler newSingle(ThreadFactory threadFactory) { + return Schedulers.fromExecutor(executorService); + } + }); + } + + @Override + protected void doStop() { + closed = true; + connectionProvider.dispose(); + eventLoopGroup.shutdownGracefully(); + Schedulers.resetFactory(); + } + + @Override + protected void doClose() throws IOException {} + + private static final class SuccessfulRequestTracker implements HttpPipelinePolicy { + private final BiConsumer onSuccessfulRequest; + private final Logger logger = LogManager.getLogger(SuccessfulRequestTracker.class); + + private SuccessfulRequestTracker(BiConsumer onSuccessfulRequest) { + this.onSuccessfulRequest = onSuccessfulRequest; + } + + @Override + public Mono process(HttpPipelineCallContext context, HttpPipelineNextPolicy next) { + return next.process() + .doOnSuccess(httpResponse -> trackSuccessfulRequest(context.getHttpRequest(), httpResponse)); + } + + private void trackSuccessfulRequest(HttpRequest httpRequest, HttpResponse httpResponse) { + HttpMethod method = httpRequest.getHttpMethod(); + if (httpResponse != null && method != null && httpResponse.getStatusCode() > 199 && httpResponse.getStatusCode() <= 299) { + try { + onSuccessfulRequest.accept(method.name(), httpRequest.getUrl()); + } catch (Exception e) { + logger.warn("Unable to notify a successful request", e); + } + } + } + } +} diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 42f782c5d8276..b08b91cbf1a7f 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -19,7 +19,6 @@ package org.elasticsearch.repositories.azure; -import com.microsoft.azure.storage.LocationMode; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; @@ -101,7 +100,7 @@ public AzureRepository( if (Repository.READONLY_SETTING.exists(metadata.settings())) { this.readonly = Repository.READONLY_SETTING.get(metadata.settings()); } else { - this.readonly = locationMode == LocationMode.SECONDARY_ONLY; + this.readonly = locationMode.isSecondary(); } } @@ -131,7 +130,7 @@ protected BlobStore getBlobStore() { @Override protected AzureBlobStore createBlobStore() { - final AzureBlobStore blobStore = new AzureBlobStore(metadata, storageService, threadPool); + final AzureBlobStore blobStore = new AzureBlobStore(metadata, storageService); logger.debug(() -> new ParameterizedMessage( "using container [{}], chunk_size [{}], compress [{}], base_path [{}]", diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java index 98191f6cb8c26..30ebf74b0207d 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java @@ -19,7 +19,12 @@ package org.elasticsearch.repositories.azure; +import com.azure.core.util.serializer.JacksonAdapter; +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; @@ -27,18 +32,25 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.ReloadablePlugin; import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.ScalingExecutorBuilder; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.function.Supplier; /** * A plugin to add a repository type that writes to and from the Azure cloud storage service. @@ -46,27 +58,56 @@ public class AzureRepositoryPlugin extends Plugin implements RepositoryPlugin, ReloadablePlugin { public static final String REPOSITORY_THREAD_POOL_NAME = "repository_azure"; + public static final String NETTY_EVENT_LOOP_THREAD_POOL_NAME = "azure_event_loop"; + + static { + // Trigger static initialization with the plugin class loader + // so we have access to the proper xml parser + JacksonAdapter.createDefaultSerializerAdapter(); + } // protected for testing - final AzureStorageService azureStoreService; + final SetOnce azureStoreService = new SetOnce<>(); + private final Settings settings; + private final Map initialClientSettings; public AzureRepositoryPlugin(Settings settings) { // eagerly load client settings so that secure settings are read - this.azureStoreService = createAzureStoreService(settings); - } - - // non-static, package private for testing - AzureStorageService createAzureStoreService(final Settings settings) { - return new AzureStorageService(settings); + this.initialClientSettings = AzureStorageSettings.load(settings); + this.settings = settings; } @Override public Map getRepositories(Environment env, NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, BigArrays bigArrays, RecoverySettings recoverySettings) { - return Collections.singletonMap(AzureRepository.TYPE, - (metadata) -> new AzureRepository(metadata, namedXContentRegistry, azureStoreService, clusterService, bigArrays, - recoverySettings)); + return Collections.singletonMap(AzureRepository.TYPE, metadata -> { + AzureStorageService storageService = azureStoreService.get(); + assert storageService != null; + return new AzureRepository(metadata, namedXContentRegistry, storageService, clusterService, bigArrays, recoverySettings); + }); + } + + @Override + public Collection createComponents(Client client, + ClusterService clusterService, + ThreadPool threadPool, + ResourceWatcherService resourceWatcherService, + ScriptService scriptService, + NamedXContentRegistry xContentRegistry, + Environment environment, + NodeEnvironment nodeEnvironment, + NamedWriteableRegistry namedWriteableRegistry, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier repositoriesServiceSupplier) { + AzureClientProvider azureClientProvider = + AzureClientProvider.create(threadPool, settings); + azureStoreService.set(createAzureStorageService(settings, azureClientProvider)); + return List.of(azureClientProvider); + } + + AzureStorageService createAzureStorageService(Settings settings, AzureClientProvider azureClientProvider) { + return new AzureStorageService(settings, azureClientProvider); } @Override @@ -86,11 +127,16 @@ public List> getSettings() { @Override public List> getExecutorBuilders(Settings settings) { - return Collections.singletonList(executorBuilder()); + return List.of(executorBuilder(), nettyEventLoopExecutorBuilder(settings)); } public static ExecutorBuilder executorBuilder() { - return new ScalingExecutorBuilder(REPOSITORY_THREAD_POOL_NAME, 0, 32, TimeValue.timeValueSeconds(30L)); + return new ScalingExecutorBuilder(REPOSITORY_THREAD_POOL_NAME, 0, 5, TimeValue.timeValueSeconds(30L)); + } + + public static ExecutorBuilder nettyEventLoopExecutorBuilder(Settings settings) { + int eventLoopThreads = AzureClientProvider.eventLoopThreadsFromSettings(settings); + return new ScalingExecutorBuilder(NETTY_EVENT_LOOP_THREAD_POOL_NAME, 0, eventLoopThreads, TimeValue.timeValueSeconds(30L)); } @Override @@ -100,6 +146,8 @@ public void reload(Settings settings) { if (clientsSettings.isEmpty()) { throw new SettingsException("If you want to use an azure repository, you need to define a client configuration."); } - azureStoreService.refreshAndClearCache(clientsSettings); + AzureStorageService storageService = azureStoreService.get(); + assert storageService != null; + storageService.refreshSettings(clientsSettings); } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index bd048b2f9d3df..af789c12ca939 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -19,96 +19,164 @@ package org.elasticsearch.repositories.azure; -import com.microsoft.azure.storage.CloudStorageAccount; -import com.microsoft.azure.storage.Constants; -import com.microsoft.azure.storage.OperationContext; -import com.microsoft.azure.storage.RetryPolicy; -import com.microsoft.azure.storage.RetryPolicyFactory; -import com.microsoft.azure.storage.RetryExponentialRetry; -import com.microsoft.azure.storage.blob.BlobRequestOptions; -import com.microsoft.azure.storage.blob.CloudBlobClient; -import org.elasticsearch.common.collect.Tuple; +import com.azure.core.http.ProxyOptions; +import com.azure.core.util.logging.ClientLogger; +import com.azure.storage.common.implementation.connectionstring.StorageConnectionString; +import com.azure.storage.common.policy.RequestRetryOptions; +import com.azure.storage.common.policy.RetryPolicyType; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; -import java.net.URI; -import java.net.URISyntaxException; -import java.security.InvalidKeyException; +import java.net.InetSocketAddress; +import java.net.Proxy; +import java.net.URL; import java.util.Map; -import java.util.function.Supplier; +import java.util.function.BiConsumer; +import static com.azure.storage.blob.BlobAsyncClient.BLOB_DEFAULT_NUMBER_OF_BUFFERS; +import static com.azure.storage.blob.BlobClient.BLOB_DEFAULT_UPLOAD_BLOCK_SIZE; import static java.util.Collections.emptyMap; public class AzureStorageService { - public static final ByteSizeValue MIN_CHUNK_SIZE = new ByteSizeValue(1, ByteSizeUnit.BYTES); + /** + * The maximum size of a BlockBlob block. + * See https://docs.microsoft.com/en-us/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs + */ + public static ByteSizeValue MAX_BLOCK_SIZE = new ByteSizeValue(100, ByteSizeUnit.MB); + + /** + * The maximum number of blocks. + * See https://docs.microsoft.com/en-us/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs + */ + public static final long MAX_BLOCK_NUMBER = 50000; + + /** + * The maximum size of a Block Blob. + * See https://docs.microsoft.com/en-us/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs + */ + public static final long MAX_BLOB_SIZE = MAX_BLOCK_NUMBER * MAX_BLOCK_SIZE.getBytes(); + /** * Maximum allowed blob size in Azure blob store. */ - public static final ByteSizeValue MAX_CHUNK_SIZE = new ByteSizeValue(Constants.MAX_BLOB_SIZE, ByteSizeUnit.BYTES); + public static final ByteSizeValue MAX_CHUNK_SIZE = new ByteSizeValue(MAX_BLOB_SIZE , ByteSizeUnit.BYTES); + + // see ModelHelper.BLOB_DEFAULT_MAX_SINGLE_UPLOAD_SIZE + private static final long DEFAULT_MAX_SINGLE_UPLOAD_SIZE = new ByteSizeValue(256, ByteSizeUnit.MB).getBytes(); + private static final long DEFAULT_UPLOAD_BLOCK_SIZE = BLOB_DEFAULT_UPLOAD_BLOCK_SIZE; + private static final int DEFAULT_MAX_PARALLELISM = BLOB_DEFAULT_NUMBER_OF_BUFFERS; // 'package' for testing volatile Map storageSettings = emptyMap(); + private final AzureClientProvider azureClientProvider; + private final ClientLogger clientLogger = new ClientLogger(AzureStorageService.class); - public AzureStorageService(Settings settings) { + public AzureStorageService(Settings settings, AzureClientProvider azureClientProvider) { // eagerly load client settings so that secure settings are read final Map clientsSettings = AzureStorageSettings.load(settings); - refreshAndClearCache(clientsSettings); + refreshSettings(clientsSettings); + this.azureClientProvider = azureClientProvider; } - /** - * Creates a {@code CloudBlobClient} on each invocation using the current client - * settings. CloudBlobClient is not thread safe and the settings can change, - * therefore the instance is not cache-able and should only be reused inside a - * thread for logically coupled ops. The {@code OperationContext} is used to - * specify the proxy, but a new context is *required* for each call. - */ - public Tuple> client(String clientName) { + public AzureBlobServiceClient client(String clientName, LocationMode locationMode) { + return client(clientName, locationMode, null); + } + + public AzureBlobServiceClient client(String clientName, LocationMode locationMode, BiConsumer successfulRequestConsumer) { + final AzureStorageSettings azureStorageSettings = getClientSettings(clientName); + + RequestRetryOptions retryOptions = getRetryOptions(locationMode, azureStorageSettings); + ProxyOptions proxyOptions = getProxyOptions(azureStorageSettings); + return azureClientProvider.createClient(azureStorageSettings, locationMode, retryOptions, proxyOptions, successfulRequestConsumer); + } + + private AzureStorageSettings getClientSettings(String clientName) { final AzureStorageSettings azureStorageSettings = this.storageSettings.get(clientName); if (azureStorageSettings == null) { throw new SettingsException("Unable to find client with name [" + clientName + "]"); } - try { - return new Tuple<>(buildClient(azureStorageSettings), () -> buildOperationContext(azureStorageSettings)); - } catch (InvalidKeyException | URISyntaxException | IllegalArgumentException e) { - throw new SettingsException("Invalid azure client settings with name [" + clientName + "]", e); - } + return azureStorageSettings; } - private CloudBlobClient buildClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { - final CloudBlobClient client = createClient(azureStorageSettings); - // Set timeout option if the user sets cloud.azure.storage.timeout or - // cloud.azure.storage.xxx.timeout (it's negative by default) - final long timeout = azureStorageSettings.getTimeout().getMillis(); - if (timeout > 0) { - if (timeout > Integer.MAX_VALUE) { - throw new IllegalArgumentException("Timeout [" + azureStorageSettings.getTimeout() + "] exceeds 2,147,483,647ms."); - } - client.getDefaultRequestOptions().setTimeoutIntervalInMs((int) timeout); + private static ProxyOptions getProxyOptions(AzureStorageSettings settings) { + Proxy proxy = settings.getProxy(); + if (proxy == null) { + return null; } - // We define a default exponential retry policy - client.getDefaultRequestOptions().setRetryPolicyFactory(createRetryPolicy(azureStorageSettings)); - client.getDefaultRequestOptions().setLocationMode(azureStorageSettings.getLocationMode()); - return client; + + switch (proxy.type()) { + case HTTP: + return new ProxyOptions(ProxyOptions.Type.HTTP, (InetSocketAddress) proxy.address()); + case SOCKS: + return new ProxyOptions(ProxyOptions.Type.SOCKS5, (InetSocketAddress) proxy.address()); + default: + return null; + } + } + + // non-static, package private for testing + long getUploadBlockSize() { + return DEFAULT_UPLOAD_BLOCK_SIZE; } // non-static, package private for testing - RetryPolicyFactory createRetryPolicy(final AzureStorageSettings azureStorageSettings) { - return new RetryExponentialRetry(RetryPolicy.DEFAULT_CLIENT_BACKOFF, azureStorageSettings.getMaxRetries()); + long getSizeThresholdForMultiBlockUpload() { + return DEFAULT_MAX_SINGLE_UPLOAD_SIZE; } - private static CloudBlobClient createClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { - final String connectionString = azureStorageSettings.getConnectString(); - return CloudStorageAccount.parse(connectionString).createCloudBlobClient(); + // non-static, package private for testing + int getMaxUploadParallelism() { + return DEFAULT_MAX_PARALLELISM; } - private static OperationContext buildOperationContext(AzureStorageSettings azureStorageSettings) { - final OperationContext context = new OperationContext(); - context.setProxy(azureStorageSettings.getProxy()); - return context; + int getMaxReadRetries(String clientName) { + AzureStorageSettings azureStorageSettings = getClientSettings(clientName); + return azureStorageSettings.getMaxRetries(); + } + + // non-static, package private for testing + RequestRetryOptions getRetryOptions(LocationMode locationMode, AzureStorageSettings azureStorageSettings) { + String connectString = azureStorageSettings.getConnectString(); + StorageConnectionString storageConnectionString = StorageConnectionString.create(connectString, clientLogger); + String primaryUri = storageConnectionString.getBlobEndpoint().getPrimaryUri(); + String secondaryUri = storageConnectionString.getBlobEndpoint().getSecondaryUri(); + + if (locationMode == LocationMode.PRIMARY_THEN_SECONDARY && secondaryUri == null) { + throw new IllegalArgumentException("Unable to use " + locationMode + " location mode without a secondary location URI"); + } + + final String secondaryHost; + switch (locationMode) { + case PRIMARY_ONLY: + case SECONDARY_ONLY: + secondaryHost = null; + break; + case PRIMARY_THEN_SECONDARY: + secondaryHost = secondaryUri; + break; + case SECONDARY_THEN_PRIMARY: + secondaryHost = primaryUri; + break; + default: + assert false; + throw new AssertionError("Impossible to get here"); + } + + // The request retry policy uses seconds as the default time unit, since + // it's possible to configure a timeout < 1s we should ceil that value + // as RequestRetryOptions expects a value >= 1. + // See https://github.com/Azure/azure-sdk-for-java/issues/17590 for a proposal + // to fix this issue. + TimeValue configuredTimeout = azureStorageSettings.getTimeout(); + int timeout = configuredTimeout.duration() == -1 ? Integer.MAX_VALUE : Math.max(1, Math.toIntExact(configuredTimeout.getSeconds())); + return new RequestRetryOptions(RetryPolicyType.EXPONENTIAL, + azureStorageSettings.getMaxRetries(), timeout, + null, null, secondaryHost); } /** @@ -116,35 +184,9 @@ private static OperationContext buildOperationContext(AzureStorageSettings azure * client requests will use the new refreshed settings. * * @param clientsSettings the settings for new clients - * @return the old settings */ - public Map refreshAndClearCache(Map clientsSettings) { - final Map prevSettings = this.storageSettings; + public void refreshSettings(Map clientsSettings) { this.storageSettings = Map.copyOf(clientsSettings); - // clients are built lazily by {@link client(String)} - return prevSettings; - } - - /** - * Extract the blob name from a URI like https://myservice.azure.net/container/path/to/myfile - * It should remove the container part (first part of the path) and gives path/to/myfile - * @param uri URI to parse - * @return The blob name relative to the container - */ - static String blobNameFromUri(URI uri) { - final String path = uri.getPath(); - // We remove the container name from the path - // The 3 magic number cames from the fact if path is /container/path/to/myfile - // First occurrence is empty "/" - // Second occurrence is "container - // Last part contains "path/to/myfile" which is what we want to get - final String[] splits = path.split("/", 3); - // We return the remaining end of the string - return splits[2]; - } - - // package private for testing - BlobRequestOptions getBlobRequestOptionsForWriteBlob() { - return null; + // clients are built lazily by {@link client(String, LocationMode)} } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java index 380eef87c4172..e6fca2d9e9b86 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java @@ -19,8 +19,6 @@ package org.elasticsearch.repositories.azure; -import com.microsoft.azure.storage.LocationMode; -import com.microsoft.azure.storage.RetryPolicy; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureSetting; @@ -43,6 +41,8 @@ final class AzureStorageSettings { + public static final int DEFAULT_MAX_RETRIES = 3; + // prefix for azure client settings private static final String AZURE_CLIENT_PREFIX_KEY = "azure.client."; @@ -58,10 +58,10 @@ final class AzureStorageSettings { public static final AffixSetting SAS_TOKEN_SETTING = Setting.affixKeySetting(AZURE_CLIENT_PREFIX_KEY, "sas_token", key -> SecureSetting.secureString(key, null)); - /** max_retries: Number of retries in case of Azure errors. Defaults to 3 (RetryPolicy.DEFAULT_CLIENT_RETRY_COUNT). */ + /** max_retries: Number of retries in case of Azure errors. Defaults to 3 (RequestRetryOptions). */ public static final AffixSetting MAX_RETRIES_SETTING = Setting.affixKeySetting(AZURE_CLIENT_PREFIX_KEY, "max_retries", - (key) -> Setting.intSetting(key, RetryPolicy.DEFAULT_CLIENT_RETRY_COUNT, Setting.Property.NodeScope), + (key) -> Setting.intSetting(key, DEFAULT_MAX_RETRIES, Setting.Property.NodeScope), () -> ACCOUNT_SETTING, () -> KEY_SETTING); /** * Azure endpoint suffix. Default to core.windows.net (CloudStorageAccount.DEFAULT_DNS). @@ -97,19 +97,6 @@ final class AzureStorageSettings { private final TimeValue timeout; private final int maxRetries; private final Proxy proxy; - private final LocationMode locationMode; - - // copy-constructor - private AzureStorageSettings(String account, String connectString, String endpointSuffix, TimeValue timeout, int maxRetries, - Proxy proxy, LocationMode locationMode) { - this.account = account; - this.connectString = connectString; - this.endpointSuffix = endpointSuffix; - this.timeout = timeout; - this.maxRetries = maxRetries; - this.proxy = proxy; - this.locationMode = locationMode; - } private AzureStorageSettings(String account, String key, String sasToken, String endpointSuffix, TimeValue timeout, int maxRetries, Proxy.Type proxyType, String proxyHost, Integer proxyPort) { @@ -136,7 +123,6 @@ private AzureStorageSettings(String account, String key, String sasToken, String throw new SettingsException("Azure proxy host is unknown.", e); } } - this.locationMode = LocationMode.PRIMARY_ONLY; } public String getEndpointSuffix() { @@ -181,9 +167,6 @@ private static String buildConnectString(String account, @Nullable String key, @ return connectionStringBuilder.toString(); } - public LocationMode getLocationMode() { - return locationMode; - } @Override public String toString() { @@ -193,7 +176,6 @@ public String toString() { sb.append(", endpointSuffix='").append(endpointSuffix).append('\''); sb.append(", maxRetries=").append(maxRetries); sb.append(", proxy=").append(proxy); - sb.append(", locationMode='").append(locationMode).append('\''); sb.append('}'); return sb.toString(); } @@ -246,15 +228,4 @@ private static T getValue(Settings settings, String groupName, Setting se final String fullKey = k.toConcreteKey(groupName).toString(); return setting.getConcreteSetting(fullKey).get(settings); } - - static Map overrideLocationMode(Map clientsSettings, - LocationMode locationMode) { - final var map = new HashMap(); - for (final Map.Entry entry : clientsSettings.entrySet()) { - map.put(entry.getKey(), - new AzureStorageSettings(entry.getValue().account, entry.getValue().connectString, entry.getValue().endpointSuffix, - entry.getValue().timeout, entry.getValue().maxRetries, entry.getValue().proxy, locationMode)); - } - return Map.copyOf(map); - } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/CancellableRateLimitedFluxIterator.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/CancellableRateLimitedFluxIterator.java new file mode 100644 index 0000000000000..30f47b361e785 --- /dev/null +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/CancellableRateLimitedFluxIterator.java @@ -0,0 +1,238 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.azure; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; + +import java.util.Iterator; +import java.util.NoSuchElementException; +import java.util.Queue; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Consumer; + +/** + * An iterator that allows to subscribe to a reactive publisher and request more elements + * in batches as the iterator is consumed so an slow consumer is not overwhelmed by a fast + * producer. Additionally it provides the ability to cancel the subscription before the entire + * flux is consumed, for these cases it possible to provide a cleaner function that would be + * invoked for all the elements that weren't consumed before the cancellation. (i.e. it's + * possible to free the memory allocated for a byte buffer). + */ +class CancellableRateLimitedFluxIterator implements Subscriber, Iterator { + private static final Subscription CANCELLED_SUBSCRIPTION = new Subscription() { + @Override + public void request(long n) { + // no op + } + + @Override + public void cancel() { + // no op + } + }; + + private final int elementsPerBatch; + private final Queue queue; + private final Lock lock; + private final Condition condition; + private final Consumer cleaner; + private final AtomicReference subscription = new AtomicReference<>(); + private final Logger logger = LogManager.getLogger(CancellableRateLimitedFluxIterator.class); + private volatile Throwable error; + private volatile boolean done; + private int emittedElements; + + /** + * Creates a new CancellableRateLimitedFluxIterator that would request to it's upstream publisher + * in batches as specified in {@code elementsPerBatch}. Additionally, it's possible to provide a + * function that would be invoked after cancellation for possibly outstanding elements that won't by + * consumed downstream but need to be cleaned in any case. + * @param elementsPerBatch the number of elements to request upstream + * @param cleaner the function that would be used to clean unused elements + */ + CancellableRateLimitedFluxIterator(int elementsPerBatch, Consumer cleaner) { + this.elementsPerBatch = elementsPerBatch; + this.queue = new ArrayBlockingQueue<>(elementsPerBatch); + this.lock = new ReentrantLock(); + this.condition = lock.newCondition(); + this.cleaner = cleaner; + } + + @Override + public boolean hasNext() { + // This method acts as a barrier between producers and consumers + // and it's possible that the consumer thread is blocked + // waiting until the producer emits an element. + for (; ; ) { + boolean isDone = done; + boolean isQueueEmpty = queue.isEmpty(); + + if (isDone) { + Throwable e = error; + if (e != null) { + throw new RuntimeException(e); + } else if (isQueueEmpty) { + return false; + } + } + + if (isQueueEmpty == false) { + return true; + } + + // Provide visibility guarantees for the modified queue + lock.lock(); + try { + while (done == false && queue.isEmpty()) { + condition.await(); + } + } catch (InterruptedException e) { + cancelSubscription(); + throw new RuntimeException(e); + } finally { + lock.unlock(); + } + } + } + + @Override + public T next() { + // We block here until the producer has emitted an element. + if (hasNext() == false) { + throw new NoSuchElementException(); + } + + T nextElement = queue.poll(); + + if (nextElement == null) { + cancelSubscription(); + signalConsumer(); + + throw new IllegalStateException("Queue is empty: Expected one element to be available from the Reactive Streams source."); + } + + int totalEmittedElements = emittedElements + 1; + if (totalEmittedElements == elementsPerBatch) { + emittedElements = 0; + subscription.get().request(totalEmittedElements); + } + else { + emittedElements = totalEmittedElements; + } + + return nextElement; + } + + @Override + public void onSubscribe(Subscription s) { + if (subscription.compareAndSet(null, s)) { + s.request(elementsPerBatch); + } else { + s.cancel(); + } + } + + @Override + public void onNext(T element) { + // It's possible that we receive more elements after cancelling the subscription + // since it might have outstanding requests before the cancellation. In that case + // we just clean the resources. + if (done) { + cleanElement(element); + return; + } + + if (queue.offer(element) == false) { + // If the source doesn't respect backpressure, we might lose elements, + // in that case we cancel the subscription and mark this consumer as failed + // cleaning possibly non-consumed outstanding elements. + cancelSubscription(); + onError(new RuntimeException("Queue is full: Reactive Streams source doesn't respect backpressure")); + } + signalConsumer(); + } + + public void cancel() { + cancelSubscription(); + clearQueue(); + done = true; + // cancel should be called from the consumer + // thread, but to avoid potential deadlocks + // we just try to release a possibly blocked + // consumer + signalConsumer(); + } + + @Override + public void onError(Throwable t) { + clearQueue(); + error = t; + done = true; + signalConsumer(); + } + + @Override + public void onComplete() { + done = true; + signalConsumer(); + } + + // visible for testing + Queue getQueue() { + return queue; + } + + private void signalConsumer() { + lock.lock(); + try { + condition.signalAll(); + } finally { + lock.unlock(); + } + } + + private void clearQueue() { + T element; + while ((element = queue.poll()) != null) { + cleanElement(element); + } + } + + private void cleanElement(T element) { + try { + cleaner.accept(element); + } catch (Exception e) { + logger.warn(new ParameterizedMessage("Unable to clean unused element"), e); + } + } + + private void cancelSubscription() { + Subscription previousSubscription = subscription.getAndSet(CANCELLED_SUBSCRIPTION); + previousSubscription.cancel(); + } +} diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/LocationMode.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/LocationMode.java new file mode 100644 index 0000000000000..2955f2dca0fce --- /dev/null +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/LocationMode.java @@ -0,0 +1,31 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.azure; + +enum LocationMode { + PRIMARY_ONLY, + SECONDARY_ONLY, + PRIMARY_THEN_SECONDARY, + SECONDARY_THEN_PRIMARY; + + boolean isSecondary() { + return this == SECONDARY_ONLY || this == SECONDARY_THEN_PRIMARY; + } +} diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java index 18acf088cdb32..a096fe80ce1a0 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java @@ -19,7 +19,6 @@ package org.elasticsearch.repositories.azure; -import com.microsoft.azure.storage.StorageException; import org.apache.logging.log4j.core.util.Throwables; import org.elasticsearch.SpecialPermission; @@ -40,7 +39,7 @@ public final class SocketAccess { private SocketAccess() {} - public static T doPrivilegedIOException(PrivilegedExceptionAction operation) throws IOException { + public static T doPrivilegedException(PrivilegedExceptionAction operation) { SpecialPermission.check(); try { return AccessController.doPrivileged(operation); @@ -51,18 +50,7 @@ public static T doPrivilegedIOException(PrivilegedExceptionAction operati } } - public static T doPrivilegedException(PrivilegedExceptionAction operation) throws StorageException { - SpecialPermission.check(); - try { - return AccessController.doPrivileged(operation); - } catch (PrivilegedActionException e) { - Throwables.rethrow(e.getCause()); - assert false : "always throws"; - return null; - } - } - - public static void doPrivilegedVoidException(StorageRunnable action) throws StorageException, URISyntaxException { + public static void doPrivilegedVoidException(StorageRunnable action) { SpecialPermission.check(); try { AccessController.doPrivileged((PrivilegedExceptionAction) () -> { @@ -76,7 +64,7 @@ public static void doPrivilegedVoidException(StorageRunnable action) throws Stor @FunctionalInterface public interface StorageRunnable { - void executeCouldThrow() throws StorageException, URISyntaxException, IOException; + void executeCouldThrow() throws URISyntaxException, IOException; } } diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/executors/PrivilegedExecutor.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/executors/PrivilegedExecutor.java new file mode 100644 index 0000000000000..a149097352e5c --- /dev/null +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/executors/PrivilegedExecutor.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.azure.executors; + +import org.elasticsearch.repositories.azure.SocketAccess; + +import java.util.concurrent.Executor; + +/** + * Executor that grants security permissions to the tasks executed on it. + */ +public class PrivilegedExecutor implements Executor { + private final Executor delegate; + + public PrivilegedExecutor(Executor delegate) { + this.delegate = delegate; + } + + @Override + public void execute(Runnable command) { + delegate.execute(() -> SocketAccess.doPrivilegedVoidException(command::run)); + } +} diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/executors/ReactorScheduledExecutorService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/executors/ReactorScheduledExecutorService.java new file mode 100644 index 0000000000000..0e4905a6de84f --- /dev/null +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/executors/ReactorScheduledExecutorService.java @@ -0,0 +1,188 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.azure.executors; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.threadpool.Scheduler; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.Collections; +import java.util.List; +import java.util.concurrent.AbstractExecutorService; +import java.util.concurrent.Callable; +import java.util.concurrent.Delayed; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; + +/** + * Wrapper around {@link ThreadPool} that provides the necessary scheduling methods for a {@link reactor.core.scheduler.Scheduler} to + * function. This allows injecting a custom Executor to the reactor schedulers factory and get fine grained control over the + * thread resources used. + */ +@SuppressForbidden(reason = "It wraps a ThreadPool and delegates all the work") +public class ReactorScheduledExecutorService extends AbstractExecutorService implements ScheduledExecutorService { + private final ThreadPool threadPool; + private final String executorName; + private final ExecutorService delegate; + private final Logger logger = LogManager.getLogger(ReactorScheduledExecutorService.class); + + public ReactorScheduledExecutorService(ThreadPool threadPool, String executorName) { + this.threadPool = threadPool; + this.executorName = executorName; + this.delegate = threadPool.executor(executorName); + } + + @Override + public ScheduledFuture schedule(Callable callable, long delay, TimeUnit unit) { + Scheduler.ScheduledCancellable schedule = threadPool.schedule(() -> { + try { + decorateCallable(callable).call(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }, new TimeValue(delay, unit), executorName); + + return new ReactorFuture<>(schedule); + } + + public ScheduledFuture schedule(Runnable command, long delay, TimeUnit unit) { + Runnable decoratedCommand = decorateRunnable(command); + Scheduler.ScheduledCancellable schedule = threadPool.schedule(decoratedCommand, new TimeValue(delay, unit), executorName); + return new ReactorFuture<>(schedule); + } + + @Override + public ScheduledFuture scheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit) { + Runnable decoratedCommand = decorateRunnable(command); + + return threadPool.scheduler().scheduleAtFixedRate(() -> { + try { + delegate.execute(decoratedCommand); + } catch (EsRejectedExecutionException e) { + if (e.isExecutorShutdown()) { + logger.debug(new ParameterizedMessage( + "could not schedule execution of [{}] on [{}] as executor is shut down", + decoratedCommand, executorName), e); + } else { + throw e; + } + } + }, initialDelay, period, unit); + } + + @Override + public ScheduledFuture scheduleWithFixedDelay(Runnable command, long initialDelay, long delay, TimeUnit unit) { + Runnable decorateRunnable = decorateRunnable(command); + + Scheduler.Cancellable cancellable = threadPool.scheduleWithFixedDelay(decorateRunnable, + new TimeValue(delay, unit), + executorName); + + return new ReactorFuture<>(cancellable); + } + + @Override + public void shutdown() { + // No-op + } + + @Override + public List shutdownNow() { + return Collections.emptyList(); + } + + @Override + public boolean isShutdown() { + return delegate.isShutdown(); + } + + @Override + public boolean isTerminated() { + return delegate.isTerminated(); + } + + @Override + public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { + return delegate.awaitTermination(timeout, unit); + } + + @Override + public void execute(Runnable command) { + delegate.execute(decorateRunnable(command)); + } + + protected Runnable decorateRunnable(Runnable command) { + return command; + } + + protected Callable decorateCallable(Callable callable) { + return callable; + } + + private static final class ReactorFuture implements ScheduledFuture { + private final Scheduler.Cancellable cancellable; + + private ReactorFuture(Scheduler.Cancellable cancellable) { + this.cancellable = cancellable; + } + + @Override + public long getDelay(TimeUnit unit) { + throw new UnsupportedOperationException(); + } + + @Override + public int compareTo(Delayed o) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return cancellable.cancel(); + } + + @Override + public boolean isCancelled() { + return cancellable.isCancelled(); + } + + @Override + public boolean isDone() { + return cancellable.isCancelled(); + } + + @Override + public V get() { + throw new UnsupportedOperationException(); + } + + @Override + public V get(long timeout, TimeUnit unit) { + throw new UnsupportedOperationException(); + } + } +} diff --git a/plugins/repository-azure/src/main/plugin-metadata/plugin-security.policy b/plugins/repository-azure/src/main/plugin-metadata/plugin-security.policy index 0e2572a63156d..46267313d58c7 100644 --- a/plugins/repository-azure/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/repository-azure/src/main/plugin-metadata/plugin-security.policy @@ -20,5 +20,9 @@ grant { // azure client opens socket connections for to access repository permission java.net.SocketPermission "*", "connect"; - permission java.lang.RuntimePermission "setFactory"; + // io.netty.util.concurrent.GlobalEventExecutor.startThread + permission java.lang.RuntimePermission "setContextClassLoader"; + // Used by jackson bean deserialization + permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; }; diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerRetriesTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerRetriesTests.java index af623217c2287..b6ec9fe8d1c9b 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerRetriesTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerRetriesTests.java @@ -18,17 +18,16 @@ */ package org.elasticsearch.repositories.azure; -import com.microsoft.azure.storage.Constants; -import com.microsoft.azure.storage.RetryExponentialRetry; -import com.microsoft.azure.storage.RetryPolicyFactory; -import com.microsoft.azure.storage.blob.BlobRequestOptions; +import com.azure.storage.common.policy.RequestRetryOptions; +import com.azure.storage.common.policy.RetryPolicyType; import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; import fixture.azure.AzureHttpHandler; -import org.apache.http.HttpStatus; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.bytes.BytesReference; @@ -76,6 +75,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.elasticsearch.repositories.azure.AzureRepository.Repository.CONTAINER_SETTING; +import static org.elasticsearch.repositories.azure.AzureRepository.Repository.LOCATION_MODE_SETTING; import static org.elasticsearch.repositories.azure.AzureStorageSettings.ACCOUNT_SETTING; import static org.elasticsearch.repositories.azure.AzureStorageSettings.ENDPOINT_SUFFIX_SETTING; import static org.elasticsearch.repositories.azure.AzureStorageSettings.KEY_SETTING; @@ -98,30 +98,47 @@ public class AzureBlobContainerRetriesTests extends ESTestCase { private static final long MAX_RANGE_VAL = Long.MAX_VALUE - 1L; private HttpServer httpServer; + private HttpServer secondaryHttpServer; private ThreadPool threadPool; + private AzureClientProvider clientProvider; @Before public void setUp() throws Exception { - threadPool = new TestThreadPool(getTestClass().getName(), AzureRepositoryPlugin.executorBuilder()); + threadPool = new TestThreadPool(getTestClass().getName(), + AzureRepositoryPlugin.executorBuilder(), + AzureRepositoryPlugin.nettyEventLoopExecutorBuilder(Settings.EMPTY) + ); httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); httpServer.start(); + secondaryHttpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); + secondaryHttpServer.start(); + clientProvider = AzureClientProvider.create(threadPool, Settings.EMPTY); + clientProvider.start(); super.setUp(); } @After public void tearDown() throws Exception { + clientProvider.close(); httpServer.stop(0); + secondaryHttpServer.stop(0); super.tearDown(); ThreadPool.terminate(threadPool, 10L, TimeUnit.SECONDS); } private BlobContainer createBlobContainer(final int maxRetries) { + return createBlobContainer(maxRetries, null, LocationMode.PRIMARY_ONLY); + } + + private BlobContainer createBlobContainer(final int maxRetries, String secondaryHost, final LocationMode locationMode) { final Settings.Builder clientSettings = Settings.builder(); final String clientName = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); - final InetSocketAddress address = httpServer.getAddress(); - final String endpoint = "ignored;DefaultEndpointsProtocol=http;BlobEndpoint=http://" - + InetAddresses.toUriString(address.getAddress()) + ":" + address.getPort(); + String endpoint = + "ignored;DefaultEndpointsProtocol=http;BlobEndpoint=" + getEndpointForServer(httpServer, "account"); + if (secondaryHost != null) { + endpoint += ";BlobSecondaryEndpoint=" + getEndpointForServer(secondaryHttpServer, "account"); + } clientSettings.put(ENDPOINT_SUFFIX_SETTING.getConcreteSettingForNamespace(clientName).getKey(), endpoint); clientSettings.put(MAX_RETRIES_SETTING.getConcreteSettingForNamespace(clientName).getKey(), maxRetries); clientSettings.put(TIMEOUT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), TimeValue.timeValueMillis(500)); @@ -132,17 +149,38 @@ private BlobContainer createBlobContainer(final int maxRetries) { secureSettings.setString(KEY_SETTING.getConcreteSettingForNamespace(clientName).getKey(), key); clientSettings.setSecureSettings(secureSettings); - final AzureStorageService service = new AzureStorageService(clientSettings.build()) { + final AzureStorageService service = new AzureStorageService(clientSettings.build(), clientProvider) { @Override - RetryPolicyFactory createRetryPolicy(final AzureStorageSettings azureStorageSettings) { - return new RetryExponentialRetry(1, 10, 100, azureStorageSettings.getMaxRetries()); + RequestRetryOptions getRetryOptions(LocationMode locationMode, AzureStorageSettings azureStorageSettings) { + return new RequestRetryOptions(RetryPolicyType.EXPONENTIAL, + maxRetries + 1, + 1, + 1L, + 5L, + // The SDK doesn't work well with ip endponts. Secondary host endpoints that contain + // a path causes the sdk to rewrite the endpoint with an invalid path, that's the reason why we provide just the host + + // port. + secondaryHost != null ? secondaryHost.replaceFirst("/account", "") : null); } @Override - BlobRequestOptions getBlobRequestOptionsForWriteBlob() { - BlobRequestOptions options = new BlobRequestOptions(); - options.setSingleBlobPutThresholdInBytes(Math.toIntExact(ByteSizeUnit.MB.toBytes(1))); - return options; + long getUploadBlockSize() { + return ByteSizeUnit.MB.toBytes(1); + } + + @Override + long getSizeThresholdForMultiBlockUpload() { + return ByteSizeUnit.MB.toBytes(1); + } + + @Override + int getMaxUploadParallelism() { + return 1; + } + + @Override + int getMaxReadRetries(String clientName) { + return maxRetries; } }; @@ -150,9 +188,10 @@ BlobRequestOptions getBlobRequestOptionsForWriteBlob() { Settings.builder() .put(CONTAINER_SETTING.getKey(), "container") .put(ACCOUNT_SETTING.getKey(), clientName) + .put(LOCATION_MODE_SETTING.getKey(), locationMode) .build()); - return new AzureBlobContainer(BlobPath.cleanPath(), new AzureBlobStore(repositoryMetadata, service, threadPool), threadPool); + return new AzureBlobContainer(BlobPath.cleanPath(), new AzureBlobStore(repositoryMetadata, service)); } public void testReadNonexistentBlobThrowsNoSuchFileException() { @@ -167,7 +206,7 @@ public void testReadNonexistentBlobThrowsNoSuchFileException() { blobContainer.readBlob("read_nonexistent_blob", position, length); } }); - assertThat(exception.getMessage().toLowerCase(Locale.ROOT), containsString("not found")); + assertThat(exception.toString(), exception.getMessage().toLowerCase(Locale.ROOT), containsString("not found")); } public void testReadBlobWithRetries() throws Exception { @@ -175,13 +214,14 @@ public void testReadBlobWithRetries() throws Exception { final CountDown countDownHead = new CountDown(maxRetries); final CountDown countDownGet = new CountDown(maxRetries); final byte[] bytes = randomBlobContent(); - httpServer.createContext("/container/read_blob_max_retries", exchange -> { + httpServer.createContext("/account/container/read_blob_max_retries", exchange -> { try { Streams.readFully(exchange.getRequestBody()); if ("HEAD".equals(exchange.getRequestMethod())) { if (countDownHead.countDown()) { exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(bytes.length)); + exchange.getResponseHeaders().add("Content-Length", String.valueOf(bytes.length)); exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob"); exchange.sendResponseHeaders(RestStatus.OK.getStatus(), -1); return; @@ -193,7 +233,9 @@ public void testReadBlobWithRetries() throws Exception { final int length = bytes.length - rangeStart; exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(length)); + exchange.getResponseHeaders().add("Content-Length", String.valueOf(length)); exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob"); + exchange.getResponseHeaders().add("ETag", UUIDs.base64UUID()); exchange.sendResponseHeaders(RestStatus.OK.getStatus(), length); exchange.getResponseBody().write(bytes, rangeStart, length); return; @@ -219,7 +261,7 @@ public void testReadRangeBlobWithRetries() throws Exception { final int maxRetries = randomIntBetween(1, 5); final CountDown countDownGet = new CountDown(maxRetries); final byte[] bytes = randomBlobContent(); - httpServer.createContext("/container/read_range_blob_max_retries", exchange -> { + httpServer.createContext("/account/container/read_range_blob_max_retries", exchange -> { try { Streams.readFully(exchange.getRequestBody()); if ("HEAD".equals(exchange.getRequestMethod())) { @@ -234,9 +276,13 @@ public void testReadRangeBlobWithRetries() throws Exception { final int length = (rangeEnd.get() - rangeStart) + 1; assertThat(length, lessThanOrEqualTo(bytes.length - rangeStart)); exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); + exchange.getResponseHeaders().add("Content-Range", + "bytes " + rangeStart + "-" + (rangeStart + rangeEnd.get() + 1) + "/" + bytes.length); exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(length)); + exchange.getResponseHeaders().add("Content-Length", String.valueOf(length)); exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob"); - exchange.sendResponseHeaders(RestStatus.OK.getStatus(), length); + exchange.getResponseHeaders().add("ETag", UUIDs.base64UUID()); + exchange.sendResponseHeaders(RestStatus.PARTIAL_CONTENT.getStatus(), length); exchange.getResponseBody().write(bytes, rangeStart, length); return; } @@ -260,15 +306,16 @@ public void testReadRangeBlobWithRetries() throws Exception { } public void testWriteBlobWithRetries() throws Exception { - final int maxRetries = randomIntBetween(1, 5); + final int maxRetries = randomIntBetween(1, 5); final CountDown countDown = new CountDown(maxRetries); final byte[] bytes = randomBlobContent(); - httpServer.createContext("/container/write_blob_max_retries", exchange -> { + httpServer.createContext("/account/container/write_blob_max_retries", exchange -> { if ("PUT".equals(exchange.getRequestMethod())) { if (countDown.countDown()) { final BytesReference body = Streams.readFully(exchange.getRequestBody()); if (Objects.deepEquals(bytes, BytesReference.toBytes(body))) { + exchange.getResponseHeaders().add("x-ms-request-server-encrypted", "false"); exchange.sendResponseHeaders(RestStatus.CREATED.getStatus(), -1); } else { AzureHttpHandler.sendError(exchange, RestStatus.BAD_REQUEST); @@ -297,17 +344,17 @@ public void testWriteBlobWithRetries() throws Exception { } public void testWriteLargeBlob() throws Exception { - final int maxRetries = randomIntBetween(1, 5); + final int maxRetries = randomIntBetween(2, 5); - final int nbBlocks = randomIntBetween(1, 2); - final byte[] data = randomBytes(Constants.DEFAULT_STREAM_WRITE_IN_BYTES * nbBlocks); + final byte[] data = randomBytes((int) ByteSizeUnit.MB.toBytes(10)); + int nbBlocks = (int) Math.ceil((double) data.length / (double) ByteSizeUnit.MB.toBytes(1)); final int nbErrors = 2; // we want all requests to fail at least once final AtomicInteger countDownUploads = new AtomicInteger(nbErrors * nbBlocks); final CountDown countDownComplete = new CountDown(nbErrors); final Map blocks = new ConcurrentHashMap<>(); - httpServer.createContext("/container/write_large_blob", exchange -> { + httpServer.createContext("/account/container/write_large_blob", exchange -> { if ("PUT".equals(exchange.getRequestMethod())) { final Map params = new HashMap<>(); @@ -336,6 +383,7 @@ public void testWriteLargeBlob() throws Exception { block.writeTo(blob); } assertArrayEquals(data, blob.toByteArray()); + exchange.getResponseHeaders().add("x-ms-request-server-encrypted", "false"); exchange.sendResponseHeaders(RestStatus.CREATED.getStatus(), -1); exchange.close(); return; @@ -350,9 +398,11 @@ public void testWriteLargeBlob() throws Exception { }); final BlobContainer blobContainer = createBlobContainer(maxRetries); + try (InputStream stream = new InputStreamIndexInput(new ByteArrayIndexInput("desc", data), data.length)) { - blobContainer.writeBlob("write_large_blob", stream, data.length * nbBlocks, false); + blobContainer.writeBlob("write_large_blob", stream, data.length, false); } + assertThat(countDownUploads.get(), equalTo(0)); assertThat(countDownComplete.isCountedDown(), is(true)); assertThat(blocks.isEmpty(), is(true)); @@ -360,7 +410,7 @@ public void testWriteLargeBlob() throws Exception { public void testRetryUntilFail() throws IOException { final AtomicBoolean requestReceived = new AtomicBoolean(false); - httpServer.createContext("/container/write_blob_max_retries", exchange -> { + httpServer.createContext("/account/container/write_blob_max_retries", exchange -> { try { if (requestReceived.compareAndSet(false, true)) { throw new AssertionError("Should not receive two requests"); @@ -374,7 +424,6 @@ public void testRetryUntilFail() throws IOException { final BlobContainer blobContainer = createBlobContainer(randomIntBetween(2, 5)); try (InputStream stream = new InputStream() { - @Override public int read() throws IOException { throw new IOException("foo"); @@ -387,17 +436,88 @@ public boolean markSupported() { @Override public void reset() { - throw new AssertionError("should not be called"); } }) { final IOException ioe = expectThrows(IOException.class, () -> blobContainer.writeBlob("write_blob_max_retries", stream, randomIntBetween(1, 128), randomBoolean())); - assertThat(ioe.getMessage(), is("foo")); + assertThat(ioe.getMessage(), is("Unable to write blob write_blob_max_retries")); + } + } + + public void testRetryFromSecondaryLocationPolicies() throws Exception { + final int maxRetries = randomIntBetween(1, 5); + final AtomicInteger failedHeadCalls = new AtomicInteger(); + final AtomicInteger failedGetCalls = new AtomicInteger(); + final byte[] bytes = randomBlobContent(); + + HttpHandler failingHandler = exchange -> { + try { + Streams.readFully(exchange.getRequestBody()); + if ("HEAD".equals(exchange.getRequestMethod())) { + failedHeadCalls.incrementAndGet(); + AzureHttpHandler.sendError(exchange, randomFrom(RestStatus.INTERNAL_SERVER_ERROR, RestStatus.SERVICE_UNAVAILABLE)); + } else if ("GET".equals(exchange.getRequestMethod())) { + failedGetCalls.incrementAndGet(); + AzureHttpHandler.sendError(exchange, randomFrom(RestStatus.INTERNAL_SERVER_ERROR, RestStatus.SERVICE_UNAVAILABLE)); + } + } finally { + exchange.close(); + } + }; + + HttpHandler workingHandler = exchange -> { + try { + Streams.readFully(exchange.getRequestBody()); + if ("HEAD".equals(exchange.getRequestMethod())) { + exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); + exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(bytes.length)); + exchange.getResponseHeaders().add("Content-Length", String.valueOf(bytes.length)); + exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob"); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), -1); + } else if ("GET".equals(exchange.getRequestMethod())) { + final int rangeStart = getRangeStart(exchange); + assertThat(rangeStart, lessThan(bytes.length)); + final int length = bytes.length - rangeStart; + exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); + exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(length)); + exchange.getResponseHeaders().add("Content-Length", String.valueOf(length)); + exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob"); + exchange.getResponseHeaders().add("ETag", UUIDs.base64UUID()); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), length); + exchange.getResponseBody().write(bytes, rangeStart, length); + } + } finally { + exchange.close(); + } + }; + LocationMode locationMode = randomFrom(LocationMode.PRIMARY_THEN_SECONDARY, LocationMode.SECONDARY_THEN_PRIMARY); + + String secondaryHost = null; + String blobPath = "/account/container/read_blob_from_secondary"; + if (locationMode == LocationMode.PRIMARY_THEN_SECONDARY) { + httpServer.createContext(blobPath, failingHandler); + secondaryHttpServer.createContext(blobPath, workingHandler); + // The SDK doesn't work well with secondary host endpoints that contain + // a path, that's the reason why we sould provide just the host + port; + secondaryHost = getEndpointForServer(secondaryHttpServer, "account"); + } else if (locationMode == LocationMode.SECONDARY_THEN_PRIMARY) { + secondaryHttpServer.createContext(blobPath, failingHandler); + httpServer.createContext(blobPath, workingHandler); + secondaryHost = getEndpointForServer(httpServer, "account"); + } + + final BlobContainer blobContainer = createBlobContainer(maxRetries, secondaryHost, locationMode); + try (InputStream inputStream = blobContainer.readBlob("read_blob_from_secondary")) { + assertArrayEquals(bytes, BytesReference.toBytes(Streams.readFully(inputStream))); + + // It does round robin, first tries on the primary, then on the secondary + assertThat(failedHeadCalls.get(), equalTo(1)); + assertThat(failedGetCalls.get(), equalTo(1)); } } private static byte[] randomBlobContent() { - return randomByteArrayOfLength(randomIntBetween(1, frequently() ? 512 : 1 << 20)); // rarely up to 1mb + return randomByteArrayOfLength(randomIntBetween(1, 1 << 20)); // rarely up to 1mb } private static final Pattern RANGE_PATTERN = Pattern.compile("^bytes=([0-9]+)-([0-9]+)$"); @@ -428,28 +548,8 @@ private static Optional getRangeEnd(HttpExchange exchange) { return Optional.of(Math.toIntExact(rangeEnd)); } - private static void sendIncompleteContent(HttpExchange exchange, byte[] bytes) throws IOException { - final int rangeStart = getRangeStart(exchange); - assertThat(rangeStart, lessThan(bytes.length)); - final Optional rangeEnd = getRangeEnd(exchange); - final int length; - if (rangeEnd.isPresent()) { - // adapt range end to be compliant to https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 - final int effectiveRangeEnd = Math.min(rangeEnd.get(), bytes.length - 1); - length = effectiveRangeEnd - rangeStart; - } else { - length = bytes.length - rangeStart - 1; - } - exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); - exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(length)); - exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob"); - exchange.sendResponseHeaders(HttpStatus.SC_OK, length); - final int bytesToSend = randomIntBetween(0, length - 1); - if (bytesToSend > 0) { - exchange.getResponseBody().write(bytes, rangeStart, bytesToSend); - } - if (randomBoolean()) { - exchange.getResponseBody().flush(); - } + private String getEndpointForServer(HttpServer server, String accountName) { + InetSocketAddress address = server.getAddress(); + return "http://" + InetAddresses.toUriString(address.getAddress()) + ":" + address.getPort() + "/" + accountName; } } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureClientProviderTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureClientProviderTests.java new file mode 100644 index 0000000000000..ea5095ff59db4 --- /dev/null +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureClientProviderTests.java @@ -0,0 +1,112 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.azure; + +import com.azure.storage.common.policy.RequestRetryOptions; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; + +public class AzureClientProviderTests extends ESTestCase { + private static final BiConsumer EMPTY_CONSUMER = (method, url) -> { }; + + private ThreadPool threadPool; + private AzureClientProvider azureClientProvider; + + @Before + public void setUpThreadPool() { + threadPool = new TestThreadPool(getTestName(), + AzureRepositoryPlugin.executorBuilder(), + AzureRepositoryPlugin.nettyEventLoopExecutorBuilder(Settings.EMPTY) + ); + azureClientProvider = AzureClientProvider.create(threadPool, Settings.EMPTY); + } + + @After + public void tearDownThreadPool() { + azureClientProvider.close(); + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + } + + public void testCanCreateAClientWithSecondaryLocation() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("azure.client.azure1.account", "myaccount1"); + secureSettings.setString("azure.client.azure1.key", encodeKey("mykey1")); + + final String endpoint; + if (randomBoolean()) { + endpoint = "ignored;BlobEndpoint=https://myaccount1.blob.core.windows.net;" + + "BlobSecondaryEndpoint=https://myaccount1-secondary.blob.core.windows.net"; + } else { + endpoint = "core.windows.net"; + } + + final Settings settings = Settings.builder() + .setSecureSettings(secureSettings) + .put("azure.client.azure1.endpoint_suffix", endpoint) + .build(); + + Map clientSettings = AzureStorageSettings.load(settings); + AzureStorageSettings storageSettings = clientSettings.get("azure1"); + assertNotNull(storageSettings); + + LocationMode locationMode = LocationMode.SECONDARY_ONLY; + RequestRetryOptions requestRetryOptions = new RequestRetryOptions(); + azureClientProvider.createClient(storageSettings, locationMode, requestRetryOptions, null, EMPTY_CONSUMER); + } + + public void testCanNotCreateAClientWithSecondaryLocationWithoutAProperEndpoint() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("azure.client.azure1.account", "myaccount1"); + secureSettings.setString("azure.client.azure1.key", encodeKey("mykey1")); + + final String endpoint = "ignored;BlobEndpoint=https://myaccount1.blob.core.windows.net"; + + final Settings settings = Settings.builder() + .setSecureSettings(secureSettings) + .put("azure.client.azure1.endpoint_suffix", endpoint) + .build(); + + Map clientSettings = AzureStorageSettings.load(settings); + AzureStorageSettings storageSettings = clientSettings.get("azure1"); + assertNotNull(storageSettings); + + LocationMode locationMode = LocationMode.SECONDARY_ONLY; + RequestRetryOptions requestRetryOptions = new RequestRetryOptions(); + expectThrows(IllegalArgumentException.class, () -> { + azureClientProvider.createClient(storageSettings, locationMode, requestRetryOptions, null, EMPTY_CONSUMER); + }); + } + + private static String encodeKey(final String value) { + return Base64.getEncoder().encodeToString(value.getBytes(StandardCharsets.UTF_8)); + } +} diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java index de2abe91d35bb..0c475a5b48515 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.repositories.azure; -import com.microsoft.azure.storage.LocationMode; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java index ba7231851148f..f0af2875df27d 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java @@ -19,37 +19,50 @@ package org.elasticsearch.repositories.azure; -import com.microsoft.azure.storage.RetryExponentialRetry; -import com.microsoft.azure.storage.blob.CloudBlobClient; -import com.microsoft.azure.storage.core.Base64; +import com.azure.storage.common.policy.RequestRetryOptions; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; import java.io.IOException; import java.io.UncheckedIOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.Proxy; -import java.net.URI; -import java.net.URISyntaxException; import java.net.UnknownHostException; import java.nio.charset.StandardCharsets; +import java.util.Base64; import java.util.Collections; import java.util.Map; -import static org.elasticsearch.repositories.azure.AzureStorageService.blobNameFromUri; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.emptyString; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; public class AzureStorageServiceTests extends ESTestCase { + private ThreadPool threadPool; + + @Before + public void setUpThreadPool() { + threadPool = new TestThreadPool(AzureStorageServiceTests.class.getName(), + AzureRepositoryPlugin.executorBuilder(), + AzureRepositoryPlugin.nettyEventLoopExecutorBuilder(Settings.EMPTY) + ); + } + + @After + public void tearDownThreadPool() { + terminate(threadPool); + } public void testReadSecuredSettings() { final Settings settings = Settings.builder().setSecureSettings(buildSecureSettings()) @@ -66,12 +79,23 @@ public void testReadSecuredSettings() { private AzureRepositoryPlugin pluginWithSettingsValidation(Settings settings) { final AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings); new SettingsModule(settings, plugin.getSettings(), Collections.emptyList(), Collections.emptySet()); + plugin.createComponents(null, + null, + threadPool, + null, + null, + null, + null, + null, + null, + null, + null); return plugin; } private AzureStorageService storageServiceWithSettingsValidation(Settings settings) { try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings)) { - return plugin.azureStoreService; + return plugin.azureStoreService.get(); } catch (IOException e) { throw new UncheckedIOException(e); } @@ -81,11 +105,12 @@ public void testCreateClientWithEndpointSuffix() throws IOException { final Settings settings = Settings.builder().setSecureSettings(buildSecureSettings()) .put("azure.client.azure1.endpoint_suffix", "my_endpoint_suffix").build(); try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings)) { - final AzureStorageService azureStorageService = plugin.azureStoreService; - final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); - assertThat(client1.getEndpoint().toString(), equalTo("https://myaccount1.blob.my_endpoint_suffix")); - final CloudBlobClient client2 = azureStorageService.client("azure2").v1(); - assertThat(client2.getEndpoint().toString(), equalTo("https://myaccount2.blob.core.windows.net")); + final AzureStorageService azureStorageService = plugin.azureStoreService.get(); + AzureBlobServiceClient client1 = azureStorageService.client("azure1", LocationMode.PRIMARY_ONLY); + assertThat(client1.getSyncClient().getAccountUrl(), equalTo("https://myaccount1.blob.my_endpoint_suffix")); + + AzureBlobServiceClient client2 = azureStorageService.client("azure2", LocationMode.PRIMARY_ONLY); + assertThat(client2.getSyncClient().getAccountUrl(), equalTo("https://myaccount2.blob.core.windows.net")); } } @@ -103,29 +128,40 @@ public void testReinitClientSettings() throws IOException { secureSettings2.setString("azure.client.azure3.key", encodeKey("mykey23")); final Settings settings2 = Settings.builder().setSecureSettings(secureSettings2).build(); try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings1)) { - final AzureStorageService azureStorageService = plugin.azureStoreService; - final CloudBlobClient client11 = azureStorageService.client("azure1").v1(); - assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount11.blob.core.windows.net")); - final CloudBlobClient client12 = azureStorageService.client("azure2").v1(); - assertThat(client12.getEndpoint().toString(), equalTo("https://myaccount12.blob.core.windows.net")); + final AzureStorageService azureStorageService = plugin.azureStoreService.get(); + + AzureBlobServiceClient client11 = azureStorageService.client("azure1", LocationMode.PRIMARY_ONLY); + assertThat(client11.getSyncClient().getAccountUrl(), equalTo("https://myaccount11.blob.core.windows.net")); + + AzureBlobServiceClient client12 = azureStorageService.client("azure2", LocationMode.PRIMARY_ONLY); + assertThat(client12.getSyncClient().getAccountUrl(), equalTo("https://myaccount12.blob.core.windows.net")); + // client 3 is missing - final SettingsException e1 = expectThrows(SettingsException.class, () -> azureStorageService.client("azure3")); + final SettingsException e1 = expectThrows(SettingsException.class, + () -> azureStorageService.client("azure3", LocationMode.PRIMARY_ONLY)); assertThat(e1.getMessage(), is("Unable to find client with name [azure3]")); + // update client settings plugin.reload(settings2); + // old client 1 not changed - assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount11.blob.core.windows.net")); + assertThat(client11.getSyncClient().getAccountUrl(), equalTo("https://myaccount11.blob.core.windows.net")); + // new client 1 is changed - final CloudBlobClient client21 = azureStorageService.client("azure1").v1(); - assertThat(client21.getEndpoint().toString(), equalTo("https://myaccount21.blob.core.windows.net")); + AzureBlobServiceClient client21 = azureStorageService.client("azure1", LocationMode.PRIMARY_ONLY); + assertThat(client21.getSyncClient().getAccountUrl(), equalTo("https://myaccount21.blob.core.windows.net")); + // old client 2 not changed - assertThat(client12.getEndpoint().toString(), equalTo("https://myaccount12.blob.core.windows.net")); + assertThat(client12.getSyncClient().getAccountUrl(), equalTo("https://myaccount12.blob.core.windows.net")); + // new client2 is gone - final SettingsException e2 = expectThrows(SettingsException.class, () -> azureStorageService.client("azure2")); + final SettingsException e2 = expectThrows(SettingsException.class, + () -> azureStorageService.client("azure2", LocationMode.PRIMARY_ONLY)); assertThat(e2.getMessage(), is("Unable to find client with name [azure2]")); + // client 3 emerged - final CloudBlobClient client23 = azureStorageService.client("azure3").v1(); - assertThat(client23.getEndpoint().toString(), equalTo("https://myaccount23.blob.core.windows.net")); + AzureBlobServiceClient client23 = azureStorageService.client("azure3", LocationMode.PRIMARY_ONLY); + assertThat(client23.getSyncClient().getAccountUrl(), equalTo("https://myaccount23.blob.core.windows.net")); } } @@ -135,17 +171,17 @@ public void testReinitClientEmptySettings() throws IOException { secureSettings.setString("azure.client.azure1.key", encodeKey("mykey11")); final Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings)) { - final AzureStorageService azureStorageService = plugin.azureStoreService; - final CloudBlobClient client11 = azureStorageService.client("azure1").v1(); - assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); + final AzureStorageService azureStorageService = plugin.azureStoreService.get(); + AzureBlobServiceClient client11 = azureStorageService.client("azure1", LocationMode.PRIMARY_ONLY); + assertThat(client11.getSyncClient().getAccountUrl(), equalTo("https://myaccount1.blob.core.windows.net")); // reinit with empty settings final SettingsException e = expectThrows(SettingsException.class, () -> plugin.reload(Settings.EMPTY)); assertThat(e.getMessage(), is("If you want to use an azure repository, you need to define a client configuration.")); // existing client untouched - assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); + assertThat(client11.getSyncClient().getAccountUrl(), equalTo("https://myaccount1.blob.core.windows.net")); // new client also untouched - final CloudBlobClient client21 = azureStorageService.client("azure1").v1(); - assertThat(client21.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); + AzureBlobServiceClient client21 = azureStorageService.client("azure1", LocationMode.PRIMARY_ONLY); + assertThat(client21.getSyncClient().getAccountUrl(), equalTo("https://myaccount1.blob.core.windows.net")); } } @@ -164,61 +200,18 @@ public void testReinitClientWrongSettings() throws IOException { secureSettings3.setString("azure.client.azure1.sas_token", encodeKey("mysasToken33")); final Settings settings3 = Settings.builder().setSecureSettings(secureSettings3).build(); try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings1)) { - final AzureStorageService azureStorageService = plugin.azureStoreService; - final CloudBlobClient client11 = azureStorageService.client("azure1").v1(); - assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); + final AzureStorageService azureStorageService = plugin.azureStoreService.get(); + AzureBlobServiceClient client11 = azureStorageService.client("azure1", LocationMode.PRIMARY_ONLY); + assertThat(client11.getSyncClient().getAccountUrl(), equalTo("https://myaccount1.blob.core.windows.net")); final SettingsException e1 = expectThrows(SettingsException.class, () -> plugin.reload(settings2)); assertThat(e1.getMessage(), is("Neither a secret key nor a shared access token was set.")); final SettingsException e2 = expectThrows(SettingsException.class, () -> plugin.reload(settings3)); assertThat(e2.getMessage(), is("Both a secret as well as a shared access token were set.")); // existing client untouched - assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); + assertThat(client11.getSyncClient().getAccountUrl(), equalTo("https://myaccount1.blob.core.windows.net")); } } - public void testGetSelectedClientNonExisting() { - final AzureStorageService azureStorageService = storageServiceWithSettingsValidation(buildSettings()); - final SettingsException e = expectThrows(SettingsException.class, () -> azureStorageService.client("azure4")); - assertThat(e.getMessage(), is("Unable to find client with name [azure4]")); - } - - public void testGetSelectedClientDefaultTimeout() { - final Settings timeoutSettings = Settings.builder() - .setSecureSettings(buildSecureSettings()) - .put("azure.client.azure3.timeout", "30s") - .build(); - final AzureStorageService azureStorageService = storageServiceWithSettingsValidation(timeoutSettings); - final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); - assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), nullValue()); - final CloudBlobClient client3 = azureStorageService.client("azure3").v1(); - assertThat(client3.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(30 * 1000)); - } - - public void testGetSelectedClientNoTimeout() { - final AzureStorageService azureStorageService = storageServiceWithSettingsValidation(buildSettings()); - final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); - assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(nullValue())); - } - - public void testGetSelectedClientBackoffPolicy() { - final AzureStorageService azureStorageService = storageServiceWithSettingsValidation(buildSettings()); - final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); - assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), is(notNullValue())); - assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), instanceOf(RetryExponentialRetry.class)); - } - - public void testGetSelectedClientBackoffPolicyNbRetries() { - final Settings timeoutSettings = Settings.builder() - .setSecureSettings(buildSecureSettings()) - .put("azure.client.azure1.max_retries", 7) - .build(); - - final AzureStorageService azureStorageService = storageServiceWithSettingsValidation(timeoutSettings); - final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); - assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), is(notNullValue())); - assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), instanceOf(RetryExponentialRetry.class)); - } - public void testNoProxy() { final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) @@ -328,17 +321,130 @@ public void testProxyWrongHost() { assertEquals("Azure proxy host is unknown.", e.getMessage()); } - public void testBlobNameFromUri() throws URISyntaxException { - String name = blobNameFromUri(new URI("https://myservice.azure.net/container/path/to/myfile")); - assertThat(name, is("path/to/myfile")); - name = blobNameFromUri(new URI("http://myservice.azure.net/container/path/to/myfile")); - assertThat(name, is("path/to/myfile")); - name = blobNameFromUri(new URI("http://127.0.0.1/container/path/to/myfile")); - assertThat(name, is("path/to/myfile")); - name = blobNameFromUri(new URI("https://127.0.0.1/container/path/to/myfile")); - assertThat(name, is("path/to/myfile")); + public void testDefaultTimeOut() throws Exception { + final Settings settings = Settings.builder() + .setSecureSettings(buildSecureSettings()) + .build(); + + try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings)) { + final AzureStorageService azureStorageService = plugin.azureStoreService.get(); + AzureStorageSettings azureStorageSettings = azureStorageService.storageSettings.get("azure1"); + RequestRetryOptions retryOptions = + azureStorageService.getRetryOptions(LocationMode.PRIMARY_ONLY, azureStorageSettings); + assertThat(retryOptions.getTryTimeout(), equalTo(Integer.MAX_VALUE)); + } } + public void testMillisecondsTimeOutIsRoundedUp() throws Exception { + final Settings settings = Settings.builder() + .setSecureSettings(buildSecureSettings()) + .put("azure.client.azure1.timeout", "200ms") + .build(); + + try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings)) { + final AzureStorageService azureStorageService = plugin.azureStoreService.get(); + AzureStorageSettings azureStorageSettings = azureStorageService.storageSettings.get("azure1"); + RequestRetryOptions retryOptions = + azureStorageService.getRetryOptions(LocationMode.PRIMARY_ONLY, azureStorageSettings); + assertThat(retryOptions.getTryTimeout(), equalTo(1)); + } + } + + public void testTimeoutConfiguration() throws Exception { + final Settings settings = Settings.builder() + .setSecureSettings(buildSecureSettings()) + .put("azure.client.azure1.timeout", "200s") + .build(); + + try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings)) { + final AzureStorageService azureStorageService = plugin.azureStoreService.get(); + AzureStorageSettings azureStorageSettings = azureStorageService.storageSettings.get("azure1"); + RequestRetryOptions retryOptions = + azureStorageService.getRetryOptions(LocationMode.PRIMARY_ONLY, azureStorageSettings); + assertThat(retryOptions.getTryTimeout(), equalTo(200)); + } + } + + public void testRetryConfigurationForSecondaryFallbackLocationMode() throws Exception { + final String endpoint; + if (randomBoolean()) { + endpoint = "core.windows.net"; + } else { + endpoint = "ignored;BlobEndpoint=https://myaccount1.blob.core.windows.net;" + + "BlobSecondaryEndpoint=https://myaccount1-secondary.blob.core.windows.net"; + } + + final Settings settings = Settings.builder() + .setSecureSettings(buildSecureSettings()) + .put("azure.client.azure1.endpoint_suffix", endpoint) + .build(); + + try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings)) { + final AzureStorageService azureStorageService = plugin.azureStoreService.get(); + AzureStorageSettings azureStorageSettings = azureStorageService.storageSettings.get("azure1"); + RequestRetryOptions retryOptions = + azureStorageService.getRetryOptions(LocationMode.PRIMARY_THEN_SECONDARY, azureStorageSettings); + assertThat(retryOptions.getSecondaryHost(), equalTo("https://myaccount1-secondary.blob.core.windows.net")); + } + } + + public void testRetryConfigurationForPrimaryFallbackLocationMode() throws Exception { + final String endpoint; + if (randomBoolean()) { + endpoint = "core.windows.net"; + } else { + endpoint = "ignored;BlobEndpoint=https://myaccount1.blob.core.windows.net;" + + "BlobSecondaryEndpoint=https://myaccount1-secondary.blob.core.windows.net"; + } + + final Settings settings = Settings.builder() + .setSecureSettings(buildSecureSettings()) + .put("azure.client.azure1.endpoint_suffix", endpoint) + .build(); + + try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings)) { + final AzureStorageService azureStorageService = plugin.azureStoreService.get(); + AzureStorageSettings azureStorageSettings = azureStorageService.storageSettings.get("azure1"); + RequestRetryOptions retryOptions = + azureStorageService.getRetryOptions(LocationMode.SECONDARY_THEN_PRIMARY, azureStorageSettings); + assertThat(retryOptions.getSecondaryHost(), equalTo("https://myaccount1.blob.core.windows.net")); + } + } + + public void testRetryConfigurationForLocationModeWithoutFallback() throws Exception { + final Settings settings = Settings.builder() + .setSecureSettings(buildSecureSettings()) + .put("azure.client.azure1.endpoint_suffix", "core.windows.net") + .build(); + + try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings)) { + final AzureStorageService azureStorageService = plugin.azureStoreService.get(); + AzureStorageSettings azureStorageSettings = azureStorageService.storageSettings.get("azure1"); + LocationMode locationMode = randomFrom(LocationMode.PRIMARY_ONLY, LocationMode.SECONDARY_ONLY); + RequestRetryOptions retryOptions = + azureStorageService.getRetryOptions(locationMode, azureStorageSettings); + + assertThat(retryOptions.getSecondaryHost(), equalTo(null)); + } + } + + public void testInvalidSettingsRetryConfigurationForLocationModeWithSecondaryFallback() throws Exception { + final String endpoint = "ignored;BlobEndpoint=https://myaccount1.blob.core.windows.net"; + final Settings settings = Settings.builder() + .setSecureSettings(buildSecureSettings()) + .put("azure.client.azure1.endpoint_suffix", endpoint) + .build(); + + try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings)) { + final AzureStorageService azureStorageService = plugin.azureStoreService.get(); + AzureStorageSettings azureStorageSettings = azureStorageService.storageSettings.get("azure1"); + + expectThrows(IllegalArgumentException.class, + () -> azureStorageService.getRetryOptions(LocationMode.PRIMARY_THEN_SECONDARY, azureStorageSettings)); + } + } + + private static MockSecureSettings buildSecureSettings() { final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("azure.client.azure1.account", "myaccount1"); @@ -350,11 +456,7 @@ private static MockSecureSettings buildSecureSettings() { return secureSettings; } - private static Settings buildSettings() { - return Settings.builder().setSecureSettings(buildSecureSettings()).build(); - } - private static String encodeKey(final String value) { - return Base64.encode(value.getBytes(StandardCharsets.UTF_8)); + return Base64.getEncoder().encodeToString(value.getBytes(StandardCharsets.UTF_8)); } } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/CancellableRateLimitedFluxIteratorTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/CancellableRateLimitedFluxIteratorTests.java new file mode 100644 index 0000000000000..8e86470f93d60 --- /dev/null +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/CancellableRateLimitedFluxIteratorTests.java @@ -0,0 +1,263 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.azure; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscription; +import reactor.core.publisher.Flux; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class CancellableRateLimitedFluxIteratorTests extends ESTestCase { + private ThreadPool threadPool; + + @Before + public void createThreadPool() { + threadPool = new TestThreadPool(getTestName()); + } + + @After + public void tearDownThreadPool() { + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + } + + public void testConsumeAllElements() { + Set cleanedElements = new HashSet<>(); + CancellableRateLimitedFluxIterator iterator = + new CancellableRateLimitedFluxIterator<>(2, cleanedElements::add); + + List numbers = randomList(randomIntBetween(1, 20), ESTestCase::randomInt); + Flux.fromIterable(numbers) + .subscribe(iterator); + + int consumedElements = 0; + while (iterator.hasNext()) { + Integer number = numbers.get(consumedElements++); + Integer next = iterator.next(); + assertThat(next, equalTo(number)); + } + + assertThat(consumedElements, equalTo(numbers.size())); + assertThat(cleanedElements, is(empty())); + assertThat(iterator.getQueue(), is(empty())); + iterator.cancel(); + } + + public void testItRequestsUpstreamInBatches() { + final int elementsPerBatch = randomIntBetween(4, 10); + final Set cleanedElements = new HashSet<>(); + final CancellableRateLimitedFluxIterator iterator = + new CancellableRateLimitedFluxIterator<>(elementsPerBatch, cleanedElements::add); + + final int providedElements = randomIntBetween(0, elementsPerBatch - 1); + Publisher publisher = s -> runOnNewThread(() -> { + s.onSubscribe(new Subscription() { + @Override + public void request(long n) { + assertThat(n, equalTo((long) elementsPerBatch)); + // Provide less elements than requested and complete + for (int i = 0; i < providedElements; i++) { + s.onNext(i); + } + s.onComplete(); + } + + @Override + public void cancel() { + + } + }); + }); + publisher.subscribe(iterator); + + final List consumedElements = new ArrayList<>(); + while (iterator.hasNext()) { + consumedElements.add(iterator.next()); + } + + assertThat(consumedElements.size(), equalTo(providedElements)); + // Elements are provided in order + for (int i = 0; i < providedElements; i++) { + assertThat(consumedElements.get(i), equalTo(i)); + } + assertThat(cleanedElements, is(empty())); + assertThat(iterator.getQueue(), is(empty())); + } + + public void testErrorPath() throws Exception { + CountDownLatch latch = new CountDownLatch(1); + Publisher publisher = s -> runOnNewThread(() -> { + s.onSubscribe(new Subscription() { + @Override + public void request(long n) { + assertThat(n, equalTo(2L)); + s.onNext(1); + s.onNext(2); + + try { + latch.await(); + } catch (InterruptedException e) { + assert false; + } + + runOnNewThread(() -> s.onError(new IOException("FAILED"))); + } + + @Override + public void cancel() { + + } + }); + }); + + Set cleaning = new HashSet<>(); + CancellableRateLimitedFluxIterator iterator = + new CancellableRateLimitedFluxIterator<>(2, cleaning::add); + publisher.subscribe(iterator); + + assertThat(iterator.hasNext(), equalTo(true)); + assertThat(iterator.next(), equalTo(1)); + + latch.countDown(); + //noinspection ResultOfMethodCallIgnored + assertBusy(() -> expectThrows(RuntimeException.class, iterator::hasNext)); + assertThat(cleaning, equalTo(Set.of(2))); + assertThat(iterator.getQueue(), is(empty())); + iterator.cancel(); + } + + public void testCancellation() throws Exception { + int requestedElements = 4; + final AtomicBoolean cancelled = new AtomicBoolean(); + Publisher publisher = s -> runOnNewThread(() -> { + s.onSubscribe(new Subscription() { + final CountDownLatch cancellationLatch = new CountDownLatch(1); + @Override + public void request(long n) { + assertThat(n, equalTo((long) requestedElements)); + s.onNext(1); + s.onNext(2); + try { + cancellationLatch.await(); + } catch (InterruptedException e) { + assert false; + } + + + runOnNewThread(() -> { + // It's possible that extra elements are emitted after cancellation + s.onNext(3); + s.onNext(4); + s.onComplete(); + }); + } + + @Override + public void cancel() { + cancelled.set(true); + cancellationLatch.countDown(); + } + }); + }); + + Set cleanedElements = new HashSet<>(); + CancellableRateLimitedFluxIterator iterator = + new CancellableRateLimitedFluxIterator<>(requestedElements, cleanedElements::add); + publisher.subscribe(iterator); + + assertThat(iterator.hasNext(), equalTo(true)); + assertThat(iterator.next(), equalTo(1)); + assertThat(iterator.next(), equalTo(2)); + iterator.cancel(); + assertThat(iterator.hasNext(), equalTo(false)); + + assertBusy(() -> assertThat(cleanedElements, equalTo(Set.of(3, 4)))); + assertThat(iterator.getQueue(), is(empty())); + } + + public void testErrorAfterCancellation() throws Exception { + int requestedElements = 4; + final AtomicBoolean cancelled = new AtomicBoolean(); + Publisher publisher = s -> runOnNewThread(() -> { + s.onSubscribe(new Subscription() { + final CountDownLatch cancellationLatch = new CountDownLatch(1); + + @Override + public void request(long n) { + assertThat(n, equalTo((long) requestedElements)); + s.onNext(1); + s.onNext(2); + try { + cancellationLatch.await(); + } catch (InterruptedException e) { + assert false; + } + + + runOnNewThread(() -> { + // It's still possible that an error is emitted after cancelling the subscription + s.onNext(3); + s.onError(new RuntimeException("Error!")); + }); + } + + @Override + public void cancel() { + cancelled.set(true); + cancellationLatch.countDown(); + } + }); + }); + + Set cleanedElements = new HashSet<>(); + CancellableRateLimitedFluxIterator iterator = + new CancellableRateLimitedFluxIterator<>(requestedElements, cleanedElements::add); + publisher.subscribe(iterator); + + assertThat(iterator.hasNext(), equalTo(true)); + assertThat(iterator.next(), equalTo(1)); + assertThat(iterator.next(), equalTo(2)); + iterator.cancel(); + //noinspection ResultOfMethodCallIgnored + assertBusy(() -> expectThrows(RuntimeException.class, iterator::hasNext)); + assertBusy(() -> assertThat(cleanedElements, equalTo(Set.of(3)))); + assertThat(iterator.getQueue(), is(empty())); + } + + public void runOnNewThread(Runnable runnable) { + threadPool.executor(ThreadPool.Names.GENERIC).submit(runnable); + } +} diff --git a/test/fixtures/azure-fixture/Dockerfile b/test/fixtures/azure-fixture/Dockerfile index 074e1caded134..a8577a2d6c791 100644 --- a/test/fixtures/azure-fixture/Dockerfile +++ b/test/fixtures/azure-fixture/Dockerfile @@ -1,5 +1,5 @@ FROM ubuntu:18.04 RUN apt-get update -qqy RUN apt-get install -qqy openjdk-11-jre-headless -ENTRYPOINT exec java -classpath "/fixture/shared/*" fixture.azure.AzureHttpFixture 0.0.0.0 8091 container +ENTRYPOINT exec java -classpath "/fixture/shared/*" fixture.azure.AzureHttpFixture 0.0.0.0 8091 azure_integration_test_account container EXPOSE 8091 diff --git a/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpFixture.java b/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpFixture.java index 1def1439429e6..6d725403e9b73 100644 --- a/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpFixture.java +++ b/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpFixture.java @@ -28,9 +28,9 @@ public class AzureHttpFixture { private final HttpServer server; - private AzureHttpFixture(final String address, final int port, final String container) throws IOException { + private AzureHttpFixture(final String address, final int port, final String account, final String container) throws IOException { this.server = HttpServer.create(new InetSocketAddress(InetAddress.getByName(address), port), 0); - server.createContext("/" + container, new AzureHttpHandler(container)); + server.createContext("/" + account, new AzureHttpHandler(account, container)); } private void start() throws Exception { @@ -44,10 +44,10 @@ private void start() throws Exception { } public static void main(final String[] args) throws Exception { - if (args == null || args.length != 3) { - throw new IllegalArgumentException("AzureHttpFixture expects 3 arguments [address, port, container]"); + if (args == null || args.length != 4) { + throw new IllegalArgumentException("AzureHttpFixture expects 4 arguments [address, port, account, container]"); } - final AzureHttpFixture fixture = new AzureHttpFixture(args[0], Integer.parseInt(args[1]), args[2]); + final AzureHttpFixture fixture = new AzureHttpFixture(args[0], Integer.parseInt(args[1]), args[2], args[3]); fixture.start(); } } diff --git a/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java b/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java index 7a94a8c9f2e57..9e53a9bc96e9c 100644 --- a/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java +++ b/test/fixtures/azure-fixture/src/main/java/fixture/azure/AzureHttpHandler.java @@ -33,6 +33,9 @@ import java.io.IOException; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; @@ -40,6 +43,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -50,11 +54,12 @@ */ @SuppressForbidden(reason = "Uses a HttpServer to emulate an Azure endpoint") public class AzureHttpHandler implements HttpHandler { - private final Map blobs; + private final String account; private final String container; - public AzureHttpHandler(final String container) { + public AzureHttpHandler(final String account, final String container) { + this.account = Objects.requireNonNull(account); this.container = Objects.requireNonNull(container); this.blobs = new ConcurrentHashMap<>(); } @@ -67,7 +72,7 @@ public void handle(final HttpExchange exchange) throws IOException { assert read == -1 : "Request body should have been empty but saw [" + read + "]"; } try { - if (Regex.simpleMatch("PUT /" + container + "/*blockid=*", request)) { + if (Regex.simpleMatch("PUT /" + account + "/" + container + "/*blockid=*", request)) { // Put Block (https://docs.microsoft.com/en-us/rest/api/storageservices/put-block) final Map params = new HashMap<>(); RestUtils.decodeQueryString(exchange.getRequestURI().getQuery(), 0, params); @@ -76,7 +81,7 @@ public void handle(final HttpExchange exchange) throws IOException { blobs.put(blockId, Streams.readFully(exchange.getRequestBody())); exchange.sendResponseHeaders(RestStatus.CREATED.getStatus(), -1); - } else if (Regex.simpleMatch("PUT /" + container + "/*comp=blocklist*", request)) { + } else if (Regex.simpleMatch("PUT /" + account + "/" + container + "/*comp=blocklist*", request)) { // Put Block List (https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-list) final String blockList = Streams.copyToString(new InputStreamReader(exchange.getRequestBody(), StandardCharsets.UTF_8)); final List blockIds = Arrays.stream(blockList.split("")) @@ -91,9 +96,10 @@ public void handle(final HttpExchange exchange) throws IOException { block.writeTo(blob); } blobs.put(exchange.getRequestURI().getPath(), new BytesArray(blob.toByteArray())); + exchange.getResponseHeaders().add("x-ms-request-server-encrypted", "false"); exchange.sendResponseHeaders(RestStatus.CREATED.getStatus(), -1); - } else if (Regex.simpleMatch("PUT /" + container + "/*", request)) { + } else if (Regex.simpleMatch("PUT /" + account + "/" + container + "/*", request)) { // PUT Blob (see https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob) final String ifNoneMatch = exchange.getRequestHeaders().getFirst("If-None-Match"); if ("*".equals(ifNoneMatch)) { @@ -104,9 +110,10 @@ public void handle(final HttpExchange exchange) throws IOException { } else { blobs.put(exchange.getRequestURI().getPath(), Streams.readFully(exchange.getRequestBody())); } + exchange.getResponseHeaders().add("x-ms-request-server-encrypted", "false"); exchange.sendResponseHeaders(RestStatus.CREATED.getStatus(), -1); - } else if (Regex.simpleMatch("HEAD /" + container + "/*", request)) { + } else if (Regex.simpleMatch("HEAD /" + account + "/" + container + "/*", request)) { // Get Blob Properties (see https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-properties) final BytesReference blob = blobs.get(exchange.getRequestURI().getPath()); if (blob == null) { @@ -114,10 +121,11 @@ public void handle(final HttpExchange exchange) throws IOException { return; } exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(blob.length())); - exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob"); + exchange.getResponseHeaders().add("Content-Length", String.valueOf(blob.length())); + exchange.getResponseHeaders().add("x-ms-blob-type", "BlockBlob"); exchange.sendResponseHeaders(RestStatus.OK.getStatus(), -1); - } else if (Regex.simpleMatch("GET /" + container + "/*", request)) { + } else if (Regex.simpleMatch("GET /" + account + "/" + container + "/*", request)) { // GET Object (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html) final BytesReference blob = blobs.get(exchange.getRequestURI().getPath()); if (blob == null) { @@ -138,15 +146,16 @@ public void handle(final HttpExchange exchange) throws IOException { exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(length)); exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob"); + exchange.getResponseHeaders().add("ETag", "\"blockblob\""); exchange.sendResponseHeaders(RestStatus.OK.getStatus(), length); exchange.getResponseBody().write(blob.toBytesRef().bytes, start, length); - } else if (Regex.simpleMatch("DELETE /" + container + "/*", request)) { + } else if (Regex.simpleMatch("DELETE /" + account + "/" + container + "/*", request)) { // Delete Blob (https://docs.microsoft.com/en-us/rest/api/storageservices/delete-blob) blobs.entrySet().removeIf(blob -> blob.getKey().startsWith(exchange.getRequestURI().getPath())); exchange.sendResponseHeaders(RestStatus.ACCEPTED.getStatus(), -1); - } else if (Regex.simpleMatch("GET /container?restype=container&comp=list*", request)) { + } else if (Regex.simpleMatch("GET /" + account + "/" + container + "?*restype=container*comp=list*", request)) { // List Blobs (https://docs.microsoft.com/en-us/rest/api/storageservices/list-blobs) final Map params = new HashMap<>(); RestUtils.decodeQueryString(exchange.getRequestURI().getQuery(), 0, params); @@ -162,10 +171,10 @@ public void handle(final HttpExchange exchange) throws IOException { } list.append(""); for (Map.Entry blob : blobs.entrySet()) { - if (prefix != null && blob.getKey().startsWith("/" + container + "/" + prefix) == false) { + if (prefix != null && blob.getKey().startsWith("/" + account + "/" + container + "/" + prefix) == false) { continue; } - String blobPath = blob.getKey().replace("/" + container + "/", ""); + String blobPath = blob.getKey().replace("/" + account + "/" + container + "/", ""); if (delimiter != null) { int fromIndex = (prefix != null ? prefix.length() : 0); int delimiterPosition = blobPath.indexOf(delimiter, fromIndex); @@ -183,6 +192,7 @@ public void handle(final HttpExchange exchange) throws IOException { } list.append(""); + list.append(""); list.append(""); byte[] response = list.toString().getBytes(StandardCharsets.UTF_8); diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java index 057a1c49010a6..79684646bbd2f 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java @@ -89,7 +89,7 @@ protected final String createRepository(final String name) { protected final String createRepository(final String name, final Settings settings) { final boolean verify = randomBoolean(); - logger.debug("--> creating repository [name: {}, verify: {}, settings: {}]", name, verify, settings); + logger.info("--> creating repository [name: {}, verify: {}, settings: {}]", name, verify, settings); assertAcked(client().admin().cluster().preparePutRepository(name) .setType(repositoryType()) .setVerify(verify) diff --git a/x-pack/plugin/repositories-metering-api/qa/azure/build.gradle b/x-pack/plugin/repositories-metering-api/qa/azure/build.gradle index 6bdd2ca6b8326..1465d49039ba9 100644 --- a/x-pack/plugin/repositories-metering-api/qa/azure/build.gradle +++ b/x-pack/plugin/repositories-metering-api/qa/azure/build.gradle @@ -86,7 +86,8 @@ testClusters.matching { it.name == "integTest" }.configureEach { '127.0.0.1:' + ephemeralPort } setting 'azure.client.repositories_metering.endpoint_suffix', - { "ignored;DefaultEndpointsProtocol=http;BlobEndpoint=http://${-> fixtureAddress('azure-fixture-repositories-metering')}" }, IGNORE_VALUE + { "ignored;DefaultEndpointsProtocol=http;BlobEndpoint=http://${-> fixtureAddress('azure-fixture-repositories-metering')}/azure_integration_test_account" }, + IGNORE_VALUE } else { println "Using an external service to test " + project.name diff --git a/x-pack/plugin/searchable-snapshots/qa/azure/build.gradle b/x-pack/plugin/searchable-snapshots/qa/azure/build.gradle index 3835a8ffc1ce9..d9d3c0f36e8ae 100644 --- a/x-pack/plugin/searchable-snapshots/qa/azure/build.gradle +++ b/x-pack/plugin/searchable-snapshots/qa/azure/build.gradle @@ -69,7 +69,7 @@ testClusters.matching { it.name == "integTest" }.configureEach { '127.0.0.1:' + ephemeralPort } setting 'azure.client.searchable_snapshots.endpoint_suffix', - { "ignored;DefaultEndpointsProtocol=http;BlobEndpoint=http://${-> fixtureAddress('azure-fixture-other')}" }, IGNORE_VALUE + { "ignored;DefaultEndpointsProtocol=http;BlobEndpoint=http://${-> fixtureAddress('azure-fixture-other')}/azure_integration_test_account" }, IGNORE_VALUE } else { println "Using an external service to test " + project.name