From 06eac15e76284ba11b9d2ac5fbe9a9e5b056eb62 Mon Sep 17 00:00:00 2001 From: sfali Date: Fri, 23 Aug 2024 16:55:47 -0400 Subject: [PATCH 01/78] Setting up module for Azure storage #3253 --- build.sbt | 2 ++ project/Dependencies.scala | 10 ++++++++++ 2 files changed, 12 insertions(+) diff --git a/build.sbt b/build.sbt index 1f85a5a850..7a2a148491 100644 --- a/build.sbt +++ b/build.sbt @@ -138,6 +138,8 @@ lazy val azureStorageQueue = alpakkaProject( Scala3.settings ) +lazy val azureStorage = alpakkaProject("azure-storage", "azure.storage", Dependencies.AzureStorage, Scala3.settings) + lazy val cassandra = alpakkaProject("cassandra", "cassandra", Dependencies.Cassandra) .settings(Scala3.settings) diff --git a/project/Dependencies.scala b/project/Dependencies.scala index c0eda41cbb..02697b9097 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -109,6 +109,16 @@ object Dependencies { ) ) + val AzureStorage = Seq( + libraryDependencies ++= Seq( + "com.typesafe.akka" %% "akka-http" % AkkaHttpVersion, + "com.typesafe.akka" %% "akka-http-xml" % AkkaHttpVersion, + // in-memory filesystem for file related tests + "com.google.jimfs" % "jimfs" % "1.3.0" % Test, // ApacheV2 + wiremock + ) + ) + val CassandraVersionInDocs = "4.0" val CassandraDriverVersion = "4.17.0" val CassandraDriverVersionInDocs = "4.17" From 42b85059db08edadbb7a32f63e1ce9a849faa27d Mon Sep 17 00:00:00 2001 From: sfali Date: Fri, 23 Aug 2024 16:58:58 -0400 Subject: [PATCH 02/78] Azure storage common settings #3253 --- .../src/main/resources/reference.conf | 35 +++ .../alpakka/azure/storage/settings.scala | 217 ++++++++++++++++++ 2 files changed, 252 insertions(+) create mode 100644 azure-storage/src/main/resources/reference.conf create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala diff --git a/azure-storage/src/main/resources/reference.conf b/azure-storage/src/main/resources/reference.conf new file mode 100644 index 0000000000..3316383349 --- /dev/null +++ b/azure-storage/src/main/resources/reference.conf @@ -0,0 +1,35 @@ +alpakka { + azure-storage { + api-version = "2024-11-04" + api-version = ${?AZURE_STORAGE_API_VERSION} + signing-algorithm = "HmacSHA256" + + credentials { + # valid values are anon (annonymous), SharedKey, SharedKeyLite, SAS + authorization-type = anon + authorization-type = ${?AZURE_STORAGE_AUTHORIZATION_TYPE} + + # following + account-name = none + account-name = ${?AZURE_STORAGE_ACCOUNT_NAME} + account-key = none + account-key = ${?AZURE_STORAGE_ACCOUNT_KEY} + } + + # Default settings corresponding to automatic retry of requests in an S3 stream. + retry-settings { + # The maximum number of additional attempts (following transient errors) that will be made to process a given + # request before giving up. + max-retries = 3 + + # The minimum delay between request retries. + min-backoff = 200ms + + # The maximum delay between request retries. + max-backoff = 10s + + # Random jitter factor applied to retry delay calculation. + random-factor = 0.0 + } + } +} diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala new file mode 100644 index 0000000000..2960652ee8 --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala @@ -0,0 +1,217 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage + +import akka.actor.ClassicActorSystemProvider +import com.typesafe.config.Config + +import java.time.{Duration => JavaDuration} +import java.util.Objects +import java.util.concurrent.TimeUnit +import scala.concurrent.duration._ + +final class StorageSettings(val apiVersion: String, + val authorizationType: String, + val azureNameKeyCredential: AzureNameKeyCredential, + val retrySettings: RetrySettings, + val algorithm: String) { + + /** Java API */ + def getApiVersion: String = apiVersion + + /** Java API */ + def getAuthorizationType: String = authorizationType + + /** Java API */ + def getAzureNameKeyCredential: AzureNameKeyCredential = azureNameKeyCredential + + /** Java API */ + def getRetrySettings: RetrySettings = retrySettings + + /** Java API */ + def getAlgorithm: String = algorithm + + /** Java API */ + def witApiVersion(apiVersion: String): StorageSettings = copy(apiVersion = apiVersion) + + /** Java API */ + def withAuthorizationType(authorizationType: String): StorageSettings = copy(authorizationType = authorizationType) + + /** Java API */ + def withAzureNameKeyCredential(azureNameKeyCredential: AzureNameKeyCredential): StorageSettings = + copy(azureNameKeyCredential = azureNameKeyCredential) + + /** Java API */ + def withRetrySettings(retrySettings: RetrySettings): StorageSettings = copy(retrySettings = retrySettings) + + /** Java API */ + def withAlgorithm(algorithm: String): StorageSettings = copy(algorithm = algorithm) + + override def toString: String = + s"""StorageSettings( + | apiVersion=$apiVersion, + | authorizationType=$authorizationType, + | azureNameKeyCredential=$azureNameKeyCredential, + | retrySettings=$retrySettings, + | algorithm=$algorithm + |)""".stripMargin.replaceAll(System.lineSeparator(), "") + + override def equals(other: Any): Boolean = other match { + case that: StorageSettings => + apiVersion == that.apiVersion && + authorizationType == that.authorizationType && + Objects.equals(azureNameKeyCredential, that.azureNameKeyCredential) && + Objects.equals(retrySettings, that.retrySettings) && + algorithm == that.algorithm + + case _ => false + } + + override def hashCode(): Int = + Objects.hash(apiVersion, authorizationType, azureNameKeyCredential, retrySettings, algorithm) + + private def copy( + apiVersion: String = apiVersion, + authorizationType: String = authorizationType, + azureNameKeyCredential: AzureNameKeyCredential = azureNameKeyCredential, + retrySettings: RetrySettings = retrySettings, + algorithm: String = algorithm + ) = + StorageSettings(apiVersion, authorizationType, azureNameKeyCredential, retrySettings, algorithm) +} + +object StorageSettings { + private[storage] val ConfigPath = "alpakka.azure-storage" + + def apply( + apiVersion: String, + authorizationType: String, + azureNameKeyCredential: AzureNameKeyCredential, + retrySettings: RetrySettings, + algorithm: String + ): StorageSettings = + new StorageSettings(apiVersion, authorizationType, azureNameKeyCredential, retrySettings, algorithm) + + /** Java API */ + def create( + apiVersion: String, + authorizationType: String, + azureNameKeyCredential: AzureNameKeyCredential, + retrySettings: RetrySettings, + algorithm: String + ): StorageSettings = + StorageSettings(apiVersion, authorizationType, azureNameKeyCredential, retrySettings, algorithm) + + def apply(config: Config): StorageSettings = { + val apiVersion = config.getString("api-version") + val credentials = config.getConfig("credentials") + val authorizationType = credentials.getString("authorization-type") + val algorithm = config.getString("signing-algorithm") + + StorageSettings( + apiVersion = apiVersion, + authorizationType = authorizationType, + azureNameKeyCredential = AzureNameKeyCredential(credentials), + retrySettings = RetrySettings(config.getConfig("retry-settings")), + algorithm = algorithm + ) + } + + def apply(system: ClassicActorSystemProvider): StorageSettings = + StorageSettings(system.classicSystem.settings.config.getConfig(ConfigPath)) +} + +final class RetrySettings private (val maxRetries: Int, + val minBackoff: FiniteDuration, + val maxBackoff: FiniteDuration, + val randomFactor: Double) { + + /** Java API */ + def getMaxRetries: Int = maxRetries + + /** Java API */ + def getMinBackoff: JavaDuration = JavaDuration.ofNanos(minBackoff.toNanos) + + /** Java API */ + def getMaxBackoff: JavaDuration = JavaDuration.ofNanos(maxBackoff.toNanos) + + /** Java API */ + def getRandomFactor: Double = randomFactor + + def withMaxRetries(value: Int): RetrySettings = copy(maxRetries = value) + + def withMinBackoff(value: FiniteDuration): RetrySettings = copy(minBackoff = value) + + /** Java API */ + def withMinBackoff(value: JavaDuration): RetrySettings = + copy(minBackoff = FiniteDuration(value.toNanos, TimeUnit.NANOSECONDS)) + + def withMaxBackoff(value: FiniteDuration): RetrySettings = copy(maxBackoff = value) + + /** Java API */ + def withMaxBackoff(value: JavaDuration): RetrySettings = + copy(maxBackoff = FiniteDuration(value.toNanos, TimeUnit.NANOSECONDS)) + + def withRandomFactor(value: Double): RetrySettings = copy(randomFactor = value) + + private def copy( + maxRetries: Int = maxRetries, + minBackoff: FiniteDuration = minBackoff, + maxBackoff: FiniteDuration = maxBackoff, + randomFactor: Double = randomFactor + ) = + new RetrySettings(maxRetries, minBackoff, maxBackoff, randomFactor) + + override def toString: String = + "RetrySettings(" + + s"maxRetries=$maxRetries," + + s"minBackoff=$minBackoff," + + s"maxBackoff=$maxBackoff," + + s"randomFactor=$randomFactor)" + + override def equals(other: Any): Boolean = other match { + case that: RetrySettings => + Objects.equals(this.maxRetries, that.maxRetries) && + Objects.equals(this.minBackoff, that.minBackoff) && + Objects.equals(this.maxBackoff, that.maxBackoff) && + Objects.equals(this.randomFactor, that.randomFactor) + case _ => false + } + + override def hashCode(): Int = + Objects.hash(Int.box(maxRetries), minBackoff, maxBackoff, Double.box(randomFactor)) +} + +object RetrySettings { + val default: RetrySettings = RetrySettings(3, 200.milliseconds, 10.seconds, 0.0) + + /** Scala API */ + def apply( + maxRetries: Int, + minBackoff: FiniteDuration, + maxBackoff: FiniteDuration, + randomFactor: Double + ): RetrySettings = + new RetrySettings(maxRetries, minBackoff, maxBackoff, randomFactor) + + /** Java API */ + def create(maxRetries: Int, minBackoff: JavaDuration, maxBackoff: JavaDuration, randomFactor: Double): RetrySettings = + apply( + maxRetries, + FiniteDuration(minBackoff.toNanos, TimeUnit.NANOSECONDS), + FiniteDuration(maxBackoff.toNanos, TimeUnit.NANOSECONDS), + randomFactor + ) + + def apply(config: Config): RetrySettings = + RetrySettings( + config.getInt("max-retries"), + FiniteDuration(config.getDuration("min-backoff").toNanos, TimeUnit.NANOSECONDS), + FiniteDuration(config.getDuration("max-backoff").toNanos, TimeUnit.NANOSECONDS), + config.getDouble("random-factor") + ) +} From 4412dec735886fb597b79a2a4d2a678401077762 Mon Sep 17 00:00:00 2001 From: sfali Date: Fri, 23 Aug 2024 17:13:07 -0400 Subject: [PATCH 03/78] Some common functions #3253 --- .../alpakka/azure/storage/package.scala | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala new file mode 100644 index 0000000000..e1d049b843 --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala @@ -0,0 +1,40 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure + +import java.time.{ZoneOffset, ZonedDateTime} +import java.time.format.DateTimeFormatter + +package object storage { + + private[storage] val NewLine: String = "\n" + private[storage] val AuthorizationHeaderKey = "Authorization" + private[storage] val XmsDateHeaderKey = "x-ms-date" + private[storage] val XmsVersionHeaderKey = "x-ms-version" + private[storage] val BlobTypeHeaderKey = "x-ms-blob-type" + private[storage] val LeaseIdHeaderKey = "x-ms-lease-id" + private[storage] val FileWriteTypeHeaderKey = "x-ms-write" + private[storage] val XMsContentLengthHeaderKey = "x-ms-content-length" + private[storage] val FileTypeHeaderKey = "x-ms-type" + private[storage] val BlobType = "blob" + private[storage] val FileType = "file" + + private[storage] def getFormattedDate: String = + DateTimeFormatter.RFC_1123_DATE_TIME.format(ZonedDateTime.now(ZoneOffset.UTC)) + + /** Removes ETag quotes in the same way the official AWS tooling does. See + */ + private[storage] def removeQuotes(string: String): String = { + val trimmed = string.trim() + val tail = if (trimmed.startsWith("\"")) trimmed.drop(1) else trimmed + if (tail.endsWith("\"")) tail.dropRight(1) else tail + } + + /** This method returns `None` if given an empty `String`. This is typically used when parsing XML since its common to + * have XML elements with an empty text value inside. + */ + private[storage] def emptyStringToOption(value: String): Option[String] = if (value == "") None else Some(value) +} From dc26cea5a3fa650b4c66218b2485c6fa10794cb9 Mon Sep 17 00:00:00 2001 From: sfali Date: Fri, 23 Aug 2024 17:15:59 -0400 Subject: [PATCH 04/78] Supporting model classes #3253 --- .../stream/alpakka/azure/storage/models.scala | 173 ++++++++++++++++++ 1 file changed, 173 insertions(+) create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/models.scala diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/models.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/models.scala new file mode 100644 index 0000000000..bf8e72aacd --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/models.scala @@ -0,0 +1,173 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage + +import akka.http.scaladsl.model.{DateTime, HttpHeader} +import akka.http.scaladsl.model.headers._ +import com.typesafe.config.Config + +import java.util.{Base64, Optional} +import scala.jdk.CollectionConverters._ +import scala.jdk.OptionConverters._ + +final case class AzureNameKeyCredential(accountName: String, accountKey: Array[Byte]) + +object AzureNameKeyCredential { + def apply(accountName: String, accountKey: String): AzureNameKeyCredential = + AzureNameKeyCredential(accountName, Base64.getDecoder.decode(accountKey)) + + /** Java API */ + def create(accountName: String, accountKey: String): AzureNameKeyCredential = + AzureNameKeyCredential(accountName, accountKey) + + def apply(config: Config): AzureNameKeyCredential = { + val accountName = config.getString("account-name") + val accountKey = config.getString("account-key") + AzureNameKeyCredential(accountName, accountKey) + } +} + +/** Modelled after ObjectMetadata in S3. + * + * @param metadata + * Raw Http headers + */ +final class ObjectMetadata private (val metadata: Seq[HttpHeader]) { + + /** Java Api + */ + lazy val headers: java.util.List[akka.http.javadsl.model.HttpHeader] = + (metadata: Seq[akka.http.javadsl.model.HttpHeader]).asJava + + /** Gets the hex encoded 128-bit MD5 digest of the associated object according to RFC 1864. This data is used as an + * integrity check to verify that the data received by the caller is the same data that was sent by Azure Storage. + *

This field represents the hex encoded 128-bit MD5 digest of an object's content as calculated by Azure + * Storage. The ContentMD5 field represents the base64 encoded 128-bit MD5 digest as calculated on the caller's side. + *

+ * + * @return + * The hex encoded MD5 hash of the content for the associated object as calculated by Azure Storage. + */ + lazy val eTag: Option[String] = metadata.collectFirst { + case e: ETag => removeQuotes(e.etag.value) + } + + /** Java Api + * + * Gets the hex encoded 128-bit MD5 digest of the associated object according to RFC 1864. This data is used as an + * integrity check to verify that the data received by the caller is the same data that was sent by Azure Storage. + *

This field represents the hex encoded 128-bit MD5 digest of an object's content as calculated by Azure + * Storage. The ContentMD5 field represents the base64 encoded 128-bit MD5 digest as calculated on the caller's side. + *

+ * + * @return + * The hex encoded MD5 hash of the content for the associated object as calculated by Azure Storage. + */ + lazy val getETag: Optional[String] = eTag.toJava + + /**

Gets the Content-Length HTTP header indicating the size of the associated object in bytes.

This field + * is required when uploading objects to Storage, but the Azure Storage Java client will automatically set it when + * working directly with files. When uploading directly from a stream, set this field if possible. Otherwise the + * client must buffer the entire stream in order to calculate the content length before sending the data to Azure + * Storage.

For more information on the Content-Length HTTP header, see + * [[https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13]]

+ * + * @return + * The Content-Length HTTP header indicating the size of the associated object in bytes. + * @see + * ObjectMetadata#setContentLength(long) + */ + lazy val contentLength: Long = + metadata + .collectFirst { + case cl: `Content-Length` => + cl.length + } + .getOrElse(0) + + /** Java Api + * + *

Gets the Content-Length HTTP header indicating the size of the associated object in bytes.

This field + * is required when uploading objects to Storage, but the Azure Storage Java client will automatically set it when + * working directly with files. When uploading directly from a stream, set this field if possible. Otherwise the + * client must buffer the entire stream in order to calculate the content length before sending the data to Azure + * Storage.

For more information on the Content-Length HTTP header, see + * [[https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.13]]

+ * + * @return + * The Content-Length HTTP header indicating the size of the associated object in bytes. + * @see + * ObjectMetadata#setContentLength(long) + */ + lazy val getContentLength: Long = contentLength + + /**

Gets the Content-Type HTTP header, which indicates the type of content stored in the associated object. The + * value of this header is a standard MIME type.

When uploading files, the Azure Storage Java client will + * attempt to determine the correct content type if one hasn't been set yet. Users are responsible for ensuring a + * suitable content type is set when uploading streams. If no content type is provided and cannot be determined by + * the filename, the default content type, "application/octet-stream", will be used.

For more information on + * the Content-Type header, see [[https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17]]

+ * + * @return + * The HTTP Content-Type header, indicating the type of content stored in the associated Storage object. + * @see + * ObjectMetadata#setContentType(String) + */ + lazy val contentType: Option[String] = metadata.collectFirst { + case ct: `Content-Type` => + ct.value + } + + /** Java Api + * + *

Gets the Content-Type HTTP header, which indicates the type of content stored in the associated object. The + * value of this header is a standard MIME type.

When uploading files, the Azure Storage Java client will + * attempt to determine the correct content type if one hasn't been set yet. Users are responsible for ensuring a + * suitable content type is set when uploading streams. If no content type is provided and cannot be determined by + * the filename, the default content type, "application/octet-stream", will be used.

For more information on + * the Content-Type header, see [[https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17]]

+ * + * @return + * The HTTP Content-Type header, indicating the type of content stored in the associated Storage object. + * @see + * ObjectMetadata#setContentType(String) + */ + lazy val getContentType: Optional[String] = contentType.toJava + + /** Gets the value of the Last-Modified header, indicating the date and time at which Azure Storage last recorded a + * modification to the associated object. + * + * @return + * The date and time at which Azure Storage last recorded a modification to the associated object. + */ + lazy val lastModified: Option[DateTime] = metadata.collectFirst { + case ct: `Last-Modified` => + ct.date + } + + /** Java Api + * + * Gets the value of the Last-Modified header, indicating the date and time at which Azure Storage last recorded a + * modification to the associated object. + * + * @return + * The date and time at which Azure Storage last recorded a modification to the associated object. + */ + lazy val getLastModified: Option[DateTime] = lastModified + + override def toString: String = + s"""ObjectMetadata( + |eTag=$eTag, + | contentLength=$contentLength, + | contentType=$contentType, + | lastModified=$lastModified + |)""".stripMargin.replaceAll(System.lineSeparator(), "") +} + +object ObjectMetadata { + def apply(metadata: Seq[HttpHeader]) = new ObjectMetadata(metadata) +} From 882afcdc78c3e6c9e850d04be1db4d93a3f0a6b4 Mon Sep 17 00:00:00 2001 From: sfali Date: Fri, 23 Aug 2024 17:17:46 -0400 Subject: [PATCH 05/78] Exception representing returned error #3253 It parses the returned XML and populate StorageException --- .../azure/storage/StorageException.scala | 77 +++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageException.scala diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageException.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageException.scala new file mode 100644 index 0000000000..37b203be23 --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageException.scala @@ -0,0 +1,77 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage + +import akka.http.scaladsl.model.StatusCode + +import scala.util.{Failure, Success, Try} +import scala.xml.{Elem, NodeSeq, XML} + +final case class StorageException(statusCode: StatusCode, + errorCode: String, + errorMessage: String, + resourceName: Option[String], + resourceValue: Option[String], + reason: Option[String]) + extends RuntimeException(errorMessage) { + + override def toString: String = + s"""StorageException( + |statusCode=$statusCode, + | errorCode=$errorCode, + | errorMessage=$errorMessage, + | resourceName=$resourceName, + | resourceValue=$resourceValue, + | reason=$reason + |)""".stripMargin.replaceAll(System.lineSeparator(), "") +} + +object StorageException { + def apply(statusCode: StatusCode, + errorCode: String, + errorMessage: String, + resourceName: Option[String], + resourceValue: Option[String], + reason: Option[String]): StorageException = + new StorageException(statusCode, errorCode, errorMessage, resourceName, resourceValue, reason) + + def apply(response: String, statusCode: StatusCode): StorageException = { + def getOptionalValue(xmlResponse: Elem, elementName: String, fallBackElementName: Option[String]) = { + val element = xmlResponse \ elementName + val node = + if (element.nonEmpty) element + else if (fallBackElementName.nonEmpty) xmlResponse \ fallBackElementName.get + else NodeSeq.Empty + + emptyStringToOption(node.text) + } + + Try { + val utf8_bom = "\uFEFF" + val sanitizedResponse = if (response.startsWith(utf8_bom)) response.substring(1) else response + val xmlResponse = XML.loadString(sanitizedResponse) + StorageException( + statusCode = statusCode, + errorCode = (xmlResponse \ "Code").text, + errorMessage = (xmlResponse \ "Message").text, + resourceName = getOptionalValue(xmlResponse, "QueryParameterName", Some("HeaderName")), + resourceValue = getOptionalValue(xmlResponse, "QueryParameterValue", Some("HeaderValue")), + reason = getOptionalValue(xmlResponse, "Reason", Some("AuthenticationErrorDetail")) + ) + } match { + case Failure(ex) => + StorageException(statusCode = statusCode, + errorCode = ex.getMessage, + errorMessage = response, + resourceName = None, + resourceValue = None, + reason = None) + case Success(value) => value + } + + } +} From 5a8728e449b53acca049ebc56c0571bae570ec6e Mon Sep 17 00:00:00 2001 From: sfali Date: Fri, 23 Aug 2024 17:21:06 -0400 Subject: [PATCH 06/78] Helper class to sign and populate Authorization header #3253 1. Populates required date and version header 2. Reads current request and create authorization token as per AWS requirements 3. Populates authorization header --- .../azure/storage/impl/auth/Signer.scala | 102 ++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/auth/Signer.scala diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/auth/Signer.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/auth/Signer.scala new file mode 100644 index 0000000000..c41c55dc5e --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/auth/Signer.scala @@ -0,0 +1,102 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package impl.auth + +import akka.NotUsed +import akka.http.scaladsl.model.{HttpRequest, Uri} +import akka.http.scaladsl.model.headers._ +import akka.stream.scaladsl.Source + +import java.util.Base64 +import javax.crypto.Mac +import javax.crypto.spec.SecretKeySpec + +/** Takes initial request and add signed `Authorization` header and essential XMS headers. + * + * @param initialRequest + * - Initial HTTP request without authorization and XMS headers + * @param settings + * - Storage settings + */ +final case class Signer(initialRequest: HttpRequest, settings: StorageSettings) { + + private val credential = settings.azureNameKeyCredential + private val requestWithHeaders = + initialRequest + .addHeader(RawHeader(XmsDateHeaderKey, getFormattedDate)) + .addHeader(RawHeader(XmsVersionHeaderKey, settings.apiVersion)) + + private val mac = { + val mac = Mac.getInstance(settings.algorithm) + mac.init(new SecretKeySpec(credential.accountKey, settings.algorithm)) + mac + } + + def signedRequest: Source[HttpRequest, NotUsed] = + if (settings.authorizationType == "anon") Source.single(requestWithHeaders) + else { + val contentLengthValue = getHeaderValue(`Content-Length`.name, "0") + val contentLength = if (contentLengthValue == "0") "" else contentLengthValue + + val headersToSign = Seq( + requestWithHeaders.method.value.toUpperCase, + getHeaderValue(`Content-Encoding`.name, ""), + getHeaderValue("Content-Language", ""), + contentLength, + getHeaderValue("Content-MD5", ""), + getHeaderValue(`Content-Type`.name, ""), + "", // date, we are using `x-ms-date` instead + getHeaderValue(`If-Modified-Since`.name, ""), + getHeaderValue(`If-Match`.name, ""), + getHeaderValue(`If-None-Match`.name, ""), + getHeaderValue(`If-Unmodified-Since`.name, ""), + getHeaderValue(Range.name, "") + ) ++ getAdditionalXmsHeaders ++ getCanonicalizedResource + + val signature = sign(headersToSign) + + Source.single( + requestWithHeaders.addHeader( + RawHeader(AuthorizationHeaderKey, s"${settings.authorizationType} ${credential.accountName}:$signature") + ) + ) + } + + private def getHeaderOptionalValue(headerName: String) = + requestWithHeaders.headers.collectFirst { + case header if header.name() == headerName => header.value() + } + + private def getHeaderValue(headerName: String, defaultValue: String) = + getHeaderOptionalValue(headerName).getOrElse(defaultValue) + + private def getAdditionalXmsHeaders = + requestWithHeaders.headers.filter(header => header.name().startsWith("x-ms-")).sortBy(_.name()).map { header => + s"${header.name()}:${header.value()}" + } + + private def getCanonicalizedResource = { + val uri = requestWithHeaders.uri + val resourcePath = s"/${credential.accountName}${uri.path.toString()}" + + val queries = + uri.queryString() match { + case Some(queryString) => + Uri.Query(queryString).toMap.toSeq.sortBy(_._1).map { + case (key, value) => s"$key:$value" + } + + case None => Seq.empty[String] + } + + Seq(resourcePath) ++ queries + } + + private def sign(headersToSign: Seq[String]) = + Base64.getEncoder.encodeToString(mac.doFinal(headersToSign.mkString(NewLine).getBytes)) +} From 3616c5f0dda493f50580f1783eb13c801cbd2c49 Mon Sep 17 00:00:00 2001 From: sfali Date: Fri, 23 Aug 2024 17:24:24 -0400 Subject: [PATCH 07/78] Implementation of Azure storage functions #3253 Current implementation only supports Blob and File services. Currently supported functions: 1. Get blob and file 2. Put blob 3. Create file 3. Put range 4. Clear range 5. Delete blob and file 6. Get blob and file properties --- .../storage/impl/AzureStorageStream.scala | 389 ++++++++++++++++++ 1 file changed, 389 insertions(+) create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala new file mode 100644 index 0000000000..b0fa5fafca --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala @@ -0,0 +1,389 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package impl + +import akka.NotUsed +import akka.actor.ActorSystem +import akka.dispatch.ExecutionContexts +import akka.http.scaladsl.Http +import akka.http.scaladsl.model.StatusCodes.{Accepted, Created, NotFound, OK} +import akka.http.scaladsl.model.headers.{ + ByteRange, + CustomHeader, + RawHeader, + `Content-Length`, + `Content-Type`, + Range => RangeHeader +} +import akka.http.scaladsl.model.{ + ContentType, + HttpEntity, + HttpHeader, + HttpMethod, + HttpMethods, + HttpRequest, + HttpResponse, + ResponseEntity, + StatusCode, + Uri +} +import akka.http.scaladsl.unmarshalling.Unmarshal +import akka.stream.Materializer +import akka.stream.alpakka.azure.storage.impl.auth.Signer +import akka.stream.scaladsl.{Flow, RetryFlow, Source} +import akka.util.ByteString + +import scala.collection.immutable +import scala.concurrent.{Future, Promise} +import scala.util.{Failure, Success, Try} + +object AzureStorageStream { + + private[storage] def getObject(storageType: String, + objectPath: String, + range: Option[ByteRange], + versionId: Option[String], + leaseId: Option[String]): Source[ByteString, Future[ObjectMetadata]] = { + Source + .fromMaterializer { (mat, _) => + implicit val system: ActorSystem = mat.system + val settings = StorageSettings(system) + val request = + createRequest( + method = HttpMethods.GET, + accountName = settings.azureNameKeyCredential.accountName, + storageType = storageType, + objectPath = objectPath, + queryString = versionId.map(value => s"versionId=$value"), + headers = populateCommonHeaders(HttpEntity.Empty, range = range, leaseId = leaseId) + ) + val objectMetadataMat = Promise[ObjectMetadata]() + signAndRequest(request, settings)(mat.system) + .map(response => response.withEntity(response.entity.withoutSizeLimit)) + .mapAsync(parallelism = 1)(entityForSuccess) + .flatMapConcat { + case (entity, headers) => + objectMetadataMat.success(computeMetaData(headers, entity)) + entity.dataBytes + } + .mapError { + case e: Throwable => + objectMetadataMat.tryFailure(e) + e + } + .mapMaterializedValue(_ => objectMetadataMat.future) + } + .mapMaterializedValue(_.flatMap(identity)(ExecutionContexts.parasitic)) + } + + private[storage] def getObjectProperties(storageType: String, + objectPath: String, + versionId: Option[String], + leaseId: Option[String]): Source[Option[ObjectMetadata], NotUsed] = { + Source + .fromMaterializer { (mat, _) => + implicit val system: ActorSystem = mat.system + import mat.executionContext + val settings = StorageSettings(system) + val request = + createRequest( + method = HttpMethods.HEAD, + accountName = settings.azureNameKeyCredential.accountName, + storageType = storageType, + objectPath = objectPath, + queryString = versionId.map(value => s"versionId=$value"), + headers = populateCommonHeaders(HttpEntity.Empty, leaseId = leaseId) + ) + + signAndRequest(request, settings) + .flatMapConcat { + case HttpResponse(OK, headers, entity, _) => + Source.future( + entity.withoutSizeLimit().discardBytes().future().map(_ => Some(computeMetaData(headers, entity))) + ) + case HttpResponse(NotFound, _, entity, _) => + Source.future(entity.discardBytes().future().map(_ => None)(ExecutionContexts.parasitic)) + case response: HttpResponse => Source.future(unmarshalError(response.status, response.entity)) + } + } + .mapMaterializedValue(_ => NotUsed) + } + + private[storage] def deleteObject(storageType: String, + objectPath: String, + versionId: Option[String], + leaseId: Option[String]): Source[Option[ObjectMetadata], NotUsed] = { + Source + .fromMaterializer { (mat, _) => + implicit val system: ActorSystem = mat.system + import mat.executionContext + val settings = StorageSettings(system) + val request = + createRequest( + method = HttpMethods.DELETE, + accountName = settings.azureNameKeyCredential.accountName, + storageType = storageType, + objectPath = objectPath, + queryString = versionId.map(value => s"versionId=$value"), + headers = populateCommonHeaders(HttpEntity.Empty, leaseId = leaseId) + ) + + signAndRequest(request, settings) + .flatMapConcat { + case HttpResponse(Accepted, headers, entity, _) => + Source.future( + entity.withoutSizeLimit().discardBytes().future().map(_ => Some(computeMetaData(headers, entity))) + ) + case HttpResponse(NotFound, _, entity, _) => + Source.future(entity.discardBytes().future().map(_ => None)(ExecutionContexts.parasitic)) + case response: HttpResponse => Source.future(unmarshalError(response.status, response.entity)) + } + } + .mapMaterializedValue(_ => NotUsed) + } + + private[storage] def putBlob(blobType: String, + objectPath: String, + contentType: ContentType, + contentLength: Long, + payload: Source[ByteString, _], + leaseId: Option[String]): Source[Option[ObjectMetadata], NotUsed] = { + Source + .fromMaterializer { (mat, _) => + implicit val system: ActorSystem = mat.system + val settings = StorageSettings(system) + val httpEntity = HttpEntity(contentType, contentLength, payload) + val request = + createRequest( + HttpMethods.PUT, + settings.azureNameKeyCredential.accountName, + BlobType, + objectPath, + headers = populateCommonHeaders(httpEntity, blobType = Some(blobType), leaseId = leaseId) + ).withEntity(httpEntity) + handlePutRequest(request, settings) + } + .mapMaterializedValue(_ => NotUsed) + } + + private[storage] def createFile(objectPath: String, + contentType: ContentType, + maxSize: Long, + leaseId: Option[String]): Source[Option[ObjectMetadata], NotUsed] = { + Source + .fromMaterializer { (mat, _) => + implicit val system: ActorSystem = mat.system + val settings = StorageSettings(system) + val request = + createRequest( + HttpMethods.PUT, + settings.azureNameKeyCredential.accountName, + FileType, + objectPath, + headers = Seq( + CustomContentTypeHeader(contentType), + RawHeader(XMsContentLengthHeaderKey, maxSize.toString), + RawHeader(FileTypeHeaderKey, "file") + ) ++ leaseId.map(value => RawHeader(LeaseIdHeaderKey, value)) + ) + handlePutRequest(request, settings) + } + .mapMaterializedValue(_ => NotUsed) + } + + private[storage] def updateOrClearRange(objectPath: String, + contentType: ContentType, + range: ByteRange.Slice, + payload: Option[Source[ByteString, _]], + leaseId: Option[String]): Source[Option[ObjectMetadata], NotUsed.type] = { + Source + .fromMaterializer { (mat, _) => + implicit val system: ActorSystem = mat.system + val settings = StorageSettings(system) + val contentLength = range.last - range.first + 1 + val clearRange = payload.isEmpty + val writeType = if (clearRange) "clear" else "update" + val overrideContentLength = if (clearRange) Some(0L) else None + val httpEntity = + if (clearRange) HttpEntity.empty(contentType) else HttpEntity(contentType, contentLength, payload.get) + val request = + createRequest( + method = HttpMethods.PUT, + accountName = settings.azureNameKeyCredential.accountName, + storageType = FileType, + objectPath = objectPath, + queryString = Some("comp=range"), + headers = populateCommonHeaders(httpEntity, + overrideContentLength, + range = Some(range), + leaseId = leaseId, + writeType = Some(writeType)) + ).withEntity(httpEntity) + handlePutRequest(request, settings) + } + .mapMaterializedValue(_ => NotUsed) + } + + private def handlePutRequest(request: HttpRequest, settings: StorageSettings)(implicit system: ActorSystem) = { + import system.dispatcher + signAndRequest(request, settings).flatMapConcat { + case HttpResponse(Created, h, entity, _) => + Source.future(entity.discardBytes().future().map { _ => + val contentLengthHeader = `Content-Length` + .parseFromValueString(entity.contentLengthOption.getOrElse(0L).toString) + .map(Seq(_)) + .getOrElse(Nil) + Some(ObjectMetadata(h ++ contentLengthHeader)) + }) + case HttpResponse(NotFound, _, entity, _) => + Source.future(entity.discardBytes().future().map(_ => None)(ExecutionContexts.parasitic)) + case response: HttpResponse => Source.future(unmarshalError(response.status, response.entity)) + } + } + + private def signAndRequest( + request: HttpRequest, + settings: StorageSettings + )(implicit + system: ActorSystem): Source[HttpResponse, NotUsed] = { + import system.dispatcher + + val retriableFlow = Flow[HttpRequest] + .flatMapConcat(req => Signer(req, settings).signedRequest) + .mapAsync(parallelism = 1)( + req => + singleRequest(req) + .map(Success.apply) + .recover[Try[HttpResponse]] { + case t => + Failure(t) + } + ) + + import settings.retrySettings._ + Source + .single(request) + .via(RetryFlow.withBackoff(minBackoff, maxBackoff, randomFactor, maxRetries, retriableFlow) { + case (request, Success(response)) if isTransientError(response.status) => + response.entity.discardBytes() + Some(request) + case (request, Failure(_)) => + // Treat any exception as transient. + Some(request) + case _ => None + }) + .mapAsync(1)(Future.fromTry) + } + + private def singleRequest(request: HttpRequest)(implicit system: ActorSystem) = + Http().singleRequest(request) + + private def computeMetaData(headers: immutable.Seq[HttpHeader], entity: ResponseEntity): ObjectMetadata = { + val contentLengthHeader: Seq[HttpHeader] = `Content-Length` + .parseFromValueString(entity.contentLengthOption.getOrElse(0L).toString) + .map(Seq(_)) + .getOrElse(Nil) + val contentTypeHeader: Seq[HttpHeader] = `Content-Type` + .parseFromValueString(entity.contentType.value) + .map(Seq(_)) + .getOrElse(Nil) + ObjectMetadata( + headers ++ + contentLengthHeader ++ contentTypeHeader ++ + immutable.Seq( + CustomContentTypeHeader(entity.contentType) + ) + ) + } + + private def isTransientError(status: StatusCode): Boolean = { + // 5xx errors from S3 can be treated as transient. + status.intValue >= 500 + } + + private def unmarshalError(code: StatusCode, entity: ResponseEntity)(implicit mat: Materializer) = { + import mat.executionContext + Unmarshal(entity).to[String].map { err => + throw StorageException(err, code) + } + } + + private def entityForSuccess( + resp: HttpResponse + )(implicit mat: Materializer): Future[(ResponseEntity, Seq[HttpHeader])] = { + resp match { + case HttpResponse(status, headers, entity, _) if status.isSuccess() && !status.isRedirection() => + Future.successful((entity, headers)) + case response: HttpResponse => + unmarshalError(response.status, response.entity) + } + } + + private def populateCommonHeaders(entity: HttpEntity, + overrideContentLength: Option[Long] = None, + range: Option[ByteRange] = None, + blobType: Option[String] = None, + leaseId: Option[String], + writeType: Option[String] = None) = { + // Azure required to have these two headers (Content-Length & Content-Type) in the request + // in some cases Content-Length header must be set as 0 + val contentLength = overrideContentLength.orElse(entity.contentLengthOption).getOrElse(0L) + val maybeContentLengthHeader = + if (overrideContentLength.isEmpty && contentLength == 0L) None else Some(CustomContentLengthHeader(contentLength)) + + val maybeContentTypeHeader = + emptyStringToOption(entity.contentType.toString()) match { + case Some(value) if value != "none/none" => Some(CustomContentTypeHeader(entity.contentType)) + case _ => None + } + + val maybeRangeHeader = range.map(RangeHeader(_)) + val maybeBlobTypeHeader = blobType.map(value => RawHeader(BlobTypeHeaderKey, value)) + val maybeLeaseIdHeader = leaseId.map(value => RawHeader(LeaseIdHeaderKey, value)) + val maybeWriteTypeHeader = writeType.map(value => RawHeader(FileWriteTypeHeaderKey, value)) + + (maybeContentLengthHeader ++ maybeContentTypeHeader ++ maybeRangeHeader ++ maybeBlobTypeHeader ++ maybeLeaseIdHeader ++ maybeWriteTypeHeader).toSeq + } + + private def createRequest(method: HttpMethod, + accountName: String, + storageType: String, + objectPath: String, + queryString: Option[String] = None, + headers: Seq[HttpHeader]) = + HttpRequest(method = method, uri = createUri(accountName, storageType, objectPath, queryString), headers = headers) + + private def createUri(accountName: String, storageType: String, objectPath: String, queryString: Option[String]) = { + Uri.from(scheme = "https", + host = s"$accountName.$storageType.core.windows.net", + path = Uri.Path(objectPath).toString(), + queryString = queryString) + } + + // `Content-Type` header is by design not accessible as header. So need to have a custom + // header implementation to expose that + private case class CustomContentTypeHeader(contentType: ContentType) extends CustomHeader { + override def name(): String = "Content-Type" + + override def value(): String = contentType.value + + override def renderInRequests(): Boolean = true + + override def renderInResponses(): Boolean = true + } + + private case class CustomContentLengthHeader(contentLength: Long) extends CustomHeader { + override def name(): String = "Content-Length" + + override def value(): String = contentLength.toString + + override def renderInRequests(): Boolean = true + + override def renderInResponses(): Boolean = true + } +} From 76263904c7f24e9ecc2f893d90899515e19a44b7 Mon Sep 17 00:00:00 2001 From: sfali Date: Fri, 23 Aug 2024 17:25:12 -0400 Subject: [PATCH 08/78] Scala and Java facade to implementation #3253 --- .../azure/storage/javadsl/AzureStorage.scala | 268 ++++++++++++++++++ .../azure/storage/scaladsl/AzureStorage.scala | 177 ++++++++++++ 2 files changed, 445 insertions(+) create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/AzureStorage.scala create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureStorage.scala diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/AzureStorage.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/AzureStorage.scala new file mode 100644 index 0000000000..eb714e91ed --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/AzureStorage.scala @@ -0,0 +1,268 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package javadsl + +import akka.NotUsed +import akka.http.javadsl.model._ +import akka.http.javadsl.model.headers.ByteRange +import akka.http.scaladsl.model.headers.ByteRange.Slice +import akka.http.scaladsl.model.headers.{ByteRange => ScalaByteRange} +import akka.http.scaladsl.model.{ContentType => ScalaContentType} +import akka.stream.alpakka.azure.storage.impl.AzureStorageStream +import akka.stream.javadsl.Source +import akka.stream.scaladsl.SourceToCompletionStage +import akka.util.ByteString + +import java.util.Optional +import java.util.concurrent.CompletionStage + +/** + * Java API + */ +object AzureStorage { + + /** + * Gets blob representing `objectPath` with specified range (if applicable). + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` + * @param range range to download + * @param versionId versionId of the blob (if applicable) + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.javadsl.Source]] containing the objects data as a [[akka.util.ByteString]] along with a + * materialized value containing the [[akka.stream.alpakka.azure.storage.ObjectMetadata]] + */ + def getBlob(objectPath: String, + range: ByteRange, + versionId: Optional[String], + leaseId: Optional[String]): Source[ByteString, CompletionStage[ObjectMetadata]] = { + val scalaRange = range.asInstanceOf[ScalaByteRange] + new Source( + AzureStorageStream + .getObject(BlobType, objectPath, Some(scalaRange), Option(versionId.orElse(null)), Option(leaseId.orElse(null))) + .toCompletionStage() + ) + } + + /** + * Gets blob representing `objectPath` with specified range (if applicable). + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` + * @param versionId versionId of the blob (if applicable) + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.javadsl.Source]] containing the objects data as a [[akka.util.ByteString]] along with a + * materialized value containing the [[akka.stream.alpakka.azure.storage.ObjectMetadata]] + */ + def getBlob(objectPath: String, + versionId: Optional[String], + leaseId: Optional[String]): Source[ByteString, CompletionStage[ObjectMetadata]] = + new Source( + AzureStorageStream + .getObject(BlobType, objectPath, None, Option(versionId.orElse(null)), Option(leaseId.orElse(null))) + .toCompletionStage() + ) + + /** + * Gets file representing `objectPath` with specified range (if applicable). + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/share/my-directory/blob` + * @param range range to download + * @param versionId versionId of the file (if applicable) + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.javadsl.Source]] containing the objects data as a [[akka.util.ByteString]] along with a + * materialized value containing the [[akka.stream.alpakka.azure.storage.ObjectMetadata]] + */ + def getFile(objectPath: String, + range: ByteRange, + versionId: Optional[String], + leaseId: Optional[String]): Source[ByteString, CompletionStage[ObjectMetadata]] = { + val scalaRange = range.asInstanceOf[ScalaByteRange] + new Source( + AzureStorageStream + .getObject(FileType, objectPath, Some(scalaRange), Option(versionId.orElse(null)), Option(leaseId.orElse(null))) + .toCompletionStage() + ) + } + + /** + * Gets file representing `objectPath` with specified range (if applicable). + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/share/my-directory/blob` + * @param versionId versionId of the file (if applicable) + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.javadsl.Source]] containing the objects data as a [[akka.util.ByteString]] along with a + * materialized value containing the [[akka.stream.alpakka.azure.storage.ObjectMetadata]] + */ + def getFile(objectPath: String, + versionId: Optional[String], + leaseId: Optional[String]): Source[ByteString, CompletionStage[ObjectMetadata]] = { + new Source( + AzureStorageStream + .getObject(FileType, objectPath, None, Option(versionId.orElse(null)), Option(leaseId.orElse(null))) + .toCompletionStage() + ) + } + + /** + * Gets blob properties. + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` + * @param versionId versionId of the blob (if applicable) + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def getBlobProperties(objectPath: String, + versionId: Optional[String], + leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = + AzureStorageStream + .getObjectProperties(BlobType, objectPath, Option(versionId.orElse(null)), Option(leaseId.orElse(null))) + .map(opt => Optional.ofNullable(opt.orNull)) + .asJava + + /** + * Gets file properties. + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/share/my-directory/blob` + * @param versionId versionId of the file (if applicable) + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def getFileProperties(objectPath: String, + versionId: Optional[String], + leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = + AzureStorageStream + .getObjectProperties(FileType, objectPath, Option(versionId.orElse(null)), Option(leaseId.orElse(null))) + .map(opt => Optional.ofNullable(opt.orNull)) + .asJava + + /** + * Deletes blob. + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` + * @param versionId versionId of the blob (if applicable) + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def deleteBlob(objectPath: String, + versionId: Optional[String], + leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = + AzureStorageStream + .deleteObject(BlobType, objectPath, Option(versionId.orElse(null)), Option(leaseId.orElse(null))) + .map(opt => Optional.ofNullable(opt.orNull)) + .asJava + + /** + * Deletes file. + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/share/my-directory/blob` + * @param versionId versionId of the file (if applicable) + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def deleteFile(objectPath: String, + versionId: Optional[String], + leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = + AzureStorageStream + .deleteObject(FileType, objectPath, Option(versionId.orElse(null)), Option(leaseId.orElse(null))) + .map(opt => Optional.ofNullable(opt.orNull)) + .asJava + + /** + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` + * @param contentType content type of the blob + * @param contentLength length of the blob + * @param payload actual payload, a [[akka.stream.javadsl.Source Source]] of [[akka.util.ByteString ByteString]] + * @param blobType type of the blob, ''Must be one of:'' __'''BlockBlob, PageBlob, or AppendBlob'''__ + * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def putBlob(objectPath: String, + contentType: ContentType, + contentLength: Long, + payload: Source[ByteString, _], + blobType: String = "BlockBlob", + leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = + AzureStorageStream + .putBlob(blobType, + objectPath, + contentType.asInstanceOf[ScalaContentType], + contentLength, + payload.asScala, + Option(leaseId.orElse(null))) + .map(opt => Optional.ofNullable(opt.orNull)) + .asJava + + /** + * Creates a file. + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/share/my-directory/blob` + * @param contentType content type of the blob + * @param maxSize maximum size of the file + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def createFile(objectPath: String, + contentType: ContentType, + maxSize: Long, + leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = + AzureStorageStream + .createFile(objectPath, contentType.asInstanceOf[ScalaContentType], maxSize, Option(leaseId.orElse(null))) + .map(opt => Optional.ofNullable(opt.orNull)) + .asJava + + /** + * Updates file on the specified range. + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/share/my-directory/blob` + * @param contentType content type of the blob + * @param range range of bytes to be written + * @param payload actual payload, a [[akka.stream.javadsl.Source Source]] of [[akka.util.ByteString ByteString]] + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def updateRange(objectPath: String, + contentType: ContentType, + range: Slice, + payload: Source[ByteString, _], + leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed.type] = + AzureStorageStream + .updateOrClearRange(objectPath, + contentType.asInstanceOf[ScalaContentType], + range, + Some(payload.asScala), + Option(leaseId.orElse(null))) + .map(opt => Optional.ofNullable(opt.orNull)) + .asJava + + /** + * Clears specified range from the file. + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/share/my-directory/blob` + * @param range range of bytes to be cleared + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def clearRange(objectPath: String, + range: Slice, + leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed.type] = + AzureStorageStream + .updateOrClearRange(objectPath, + ContentTypes.NO_CONTENT_TYPE.asInstanceOf[ScalaContentType], + range, + None, + Option(leaseId.orElse(null))) + .map(opt => Optional.ofNullable(opt.orNull)) + .asJava +} diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureStorage.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureStorage.scala new file mode 100644 index 0000000000..a67f4aeed2 --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureStorage.scala @@ -0,0 +1,177 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package scaladsl + +import akka.NotUsed +import akka.http.scaladsl.model.{ContentType, ContentTypes} +import akka.http.scaladsl.model.headers.ByteRange +import akka.stream.alpakka.azure.storage.impl.AzureStorageStream +import akka.stream.scaladsl.Source +import akka.util.ByteString + +import scala.concurrent.Future + +/** + * Scala API + */ +object AzureStorage { + + /** + * Gets blob representing `objectPath` with specified range (if applicable). + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` + * @param range range to download + * @param versionId versionId of the blob (if applicable) + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.scaladsl.Source]] containing the objects data as a [[akka.util.ByteString]] along with a + * materialized value containing the [[akka.stream.alpakka.azure.storage.ObjectMetadata]] + */ + def getBlob(objectPath: String, + range: Option[ByteRange] = None, + versionId: Option[String] = None, + leaseId: Option[String] = None): Source[ByteString, Future[ObjectMetadata]] = + AzureStorageStream.getObject(BlobType, objectPath, range, versionId, leaseId) + + /** + * Gets file representing `objectPath` with specified range (if applicable). + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/share/my-directory/blob` + * @param range range to download + * @param versionId versionId of the file (if applicable) + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.scaladsl.Source]] containing the objects data as a [[akka.util.ByteString]] along with a + * materialized value containing the [[akka.stream.alpakka.azure.storage.ObjectMetadata]] + */ + def getFile(objectPath: String, + range: Option[ByteRange] = None, + versionId: Option[String] = None, + leaseId: Option[String] = None): Source[ByteString, Future[ObjectMetadata]] = + AzureStorageStream.getObject(FileType, objectPath, range, versionId, leaseId) + + /** + * Gets blob properties. + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` + * @param versionId versionId of the blob (if applicable) + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def getBlobProperties(objectPath: String, + versionId: Option[String] = None, + leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = + AzureStorageStream.getObjectProperties(BlobType, objectPath, versionId, leaseId) + + /** + * Gets file properties. + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/share/my-directory/blob` + * @param versionId versionId of the file (if applicable) + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def getFileProperties(objectPath: String, + versionId: Option[String] = None, + leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = + AzureStorageStream.getObjectProperties(FileType, objectPath, versionId, leaseId) + + /** + * Deletes blob. + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` + * @param versionId versionId of the blob (if applicable) + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def deleteBlob(objectPath: String, + versionId: Option[String] = None, + leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = + AzureStorageStream.deleteObject(BlobType, objectPath, versionId, leaseId) + + /** + * Deletes file. + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/share/my-directory/blob` + * @param versionId versionId of the file (if applicable) + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def deleteFile(objectPath: String, + versionId: Option[String] = None, + leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = + AzureStorageStream.deleteObject(FileType, objectPath, versionId, leaseId) + + /** + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` + * @param contentType content type of the blob + * @param contentLength length of the blob + * @param payload actual payload, a [[akka.stream.scaladsl.Source Source]] of [[akka.util.ByteString ByteString]] + * @param blobType type of the blob, ''Must be one of:'' __'''BlockBlob, PageBlob, or AppendBlob'''__ + * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def putBlob(objectPath: String, + contentType: ContentType = ContentTypes.`application/octet-stream`, + contentLength: Long, + payload: Source[ByteString, _], + blobType: String = "BlockBlob", + leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = + AzureStorageStream.putBlob(blobType, objectPath, contentType, contentLength, payload, leaseId) + + /** + * Creates a file. + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/share/my-directory/blob` + * @param contentType content type of the blob + * @param maxSize maximum size of the file + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def createFile(objectPath: String, + contentType: ContentType = ContentTypes.`application/octet-stream`, + maxSize: Long, + leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = + AzureStorageStream.createFile(objectPath, contentType, maxSize, leaseId) + + /** + * Updates file on the specified range. + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/share/my-directory/blob` + * @param contentType content type of the blob + * @param range range of bytes to be written + * @param payload actual payload, a [[akka.stream.scaladsl.Source Source]] of [[akka.util.ByteString ByteString]] + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def updateRange(objectPath: String, + contentType: ContentType = ContentTypes.`application/octet-stream`, + range: ByteRange.Slice, + payload: Source[ByteString, _], + leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed.type] = + AzureStorageStream.updateOrClearRange(objectPath, contentType, range, Some(payload), leaseId) + + /** + * Clears specified range from the file. + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/share/my-directory/blob` + * @param range range of bytes to be cleared + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def clearRange(objectPath: String, + range: ByteRange.Slice, + leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed.type] = + AzureStorageStream.updateOrClearRange(objectPath, ContentTypes.NoContentType, range, None, leaseId) +} From f1de2093bb2944380a6897119c8016fe7a361042 Mon Sep 17 00:00:00 2001 From: sfali Date: Sat, 24 Aug 2024 11:34:20 -0400 Subject: [PATCH 09/78] Introducing Akka stream attributes in order to configure http request #3253 --- .../azure/storage/StorageAttributes.scala | 38 +++++++++++++++++ .../alpakka/azure/storage/StorageExt.scala | 42 +++++++++++++++++++ 2 files changed, 80 insertions(+) create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageAttributes.scala create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageExt.scala diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageAttributes.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageAttributes.scala new file mode 100644 index 0000000000..ddddab1cdc --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageAttributes.scala @@ -0,0 +1,38 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage + +import akka.stream.Attributes +import akka.stream.Attributes.Attribute + +/** + * Akka Stream attributes that are used when materializing AzureStorage stream blueprints. + */ +object StorageAttributes { + + /** + * Settings to use for the S3 stream + */ + def settings(settings: StorageSettings): Attributes = Attributes(StorageSettingsValue(settings)) + + /** + * Config path which will be used to resolve required AzureStorage settings + */ + def settingsPath(path: String): Attributes = Attributes(StorageSettingsPath(path)) +} + +final class StorageSettingsPath private (val path: String) extends Attribute +object StorageSettingsPath { + val Default: StorageSettingsPath = StorageSettingsPath(StorageSettings.ConfigPath) + + def apply(path: String) = new StorageSettingsPath(path) +} + +final class StorageSettingsValue private (val settings: StorageSettings) extends Attribute +object StorageSettingsValue { + def apply(settings: StorageSettings) = new StorageSettingsValue(settings) +} diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageExt.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageExt.scala new file mode 100644 index 0000000000..7730aa964c --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageExt.scala @@ -0,0 +1,42 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage + +import akka.actor.{ + ActorSystem, + ClassicActorSystemProvider, + ExtendedActorSystem, + Extension, + ExtensionId, + ExtensionIdProvider +} + +/** + * Manages one [[StorageSettings]] per `ActorSystem`. + */ +final class StorageExt private (sys: ExtendedActorSystem) extends Extension { + val settings: StorageSettings = settings(StorageSettings.ConfigPath) + + def settings(prefix: String): StorageSettings = StorageSettings(sys.settings.config.getConfig(prefix)) +} + +object StorageExt extends ExtensionId[StorageExt] with ExtensionIdProvider { + override def lookup: StorageExt.type = StorageExt + override def createExtension(system: ExtendedActorSystem) = new StorageExt(system) + + /** + * Java API. + * Get the Storage extension with the classic actors API. + */ + override def get(system: ActorSystem): StorageExt = super.apply(system) + + /** + * Java API. + * Get the Storage extension with the new actors API. + */ + override def get(system: ClassicActorSystemProvider): StorageExt = super.apply(system) +} From 426b80046d1dc7a496bf2598a378db5bf5d1e325 Mon Sep 17 00:00:00 2001 From: sfali Date: Sat, 24 Aug 2024 11:34:48 -0400 Subject: [PATCH 10/78] Resolve settings from Akka stream attributes #3253 --- .../storage/impl/AzureStorageStream.scala | 38 ++++++++++++------- 1 file changed, 25 insertions(+), 13 deletions(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala index b0fa5fafca..fc1e02bc68 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala @@ -33,7 +33,7 @@ import akka.http.scaladsl.model.{ Uri } import akka.http.scaladsl.unmarshalling.Unmarshal -import akka.stream.Materializer +import akka.stream.{Attributes, Materializer} import akka.stream.alpakka.azure.storage.impl.auth.Signer import akka.stream.scaladsl.{Flow, RetryFlow, Source} import akka.util.ByteString @@ -50,9 +50,9 @@ object AzureStorageStream { versionId: Option[String], leaseId: Option[String]): Source[ByteString, Future[ObjectMetadata]] = { Source - .fromMaterializer { (mat, _) => + .fromMaterializer { (mat, attr) => implicit val system: ActorSystem = mat.system - val settings = StorageSettings(system) + val settings = resolveSettings(attr, system) val request = createRequest( method = HttpMethods.GET, @@ -86,10 +86,10 @@ object AzureStorageStream { versionId: Option[String], leaseId: Option[String]): Source[Option[ObjectMetadata], NotUsed] = { Source - .fromMaterializer { (mat, _) => + .fromMaterializer { (mat, attr) => implicit val system: ActorSystem = mat.system import mat.executionContext - val settings = StorageSettings(system) + val settings = resolveSettings(attr, system) val request = createRequest( method = HttpMethods.HEAD, @@ -119,10 +119,10 @@ object AzureStorageStream { versionId: Option[String], leaseId: Option[String]): Source[Option[ObjectMetadata], NotUsed] = { Source - .fromMaterializer { (mat, _) => + .fromMaterializer { (mat, attr) => implicit val system: ActorSystem = mat.system import mat.executionContext - val settings = StorageSettings(system) + val settings = resolveSettings(attr, system) val request = createRequest( method = HttpMethods.DELETE, @@ -154,9 +154,9 @@ object AzureStorageStream { payload: Source[ByteString, _], leaseId: Option[String]): Source[Option[ObjectMetadata], NotUsed] = { Source - .fromMaterializer { (mat, _) => + .fromMaterializer { (mat, attr) => implicit val system: ActorSystem = mat.system - val settings = StorageSettings(system) + val settings = resolveSettings(attr, system) val httpEntity = HttpEntity(contentType, contentLength, payload) val request = createRequest( @@ -176,9 +176,9 @@ object AzureStorageStream { maxSize: Long, leaseId: Option[String]): Source[Option[ObjectMetadata], NotUsed] = { Source - .fromMaterializer { (mat, _) => + .fromMaterializer { (mat, attr) => implicit val system: ActorSystem = mat.system - val settings = StorageSettings(system) + val settings = resolveSettings(attr, system) val request = createRequest( HttpMethods.PUT, @@ -202,9 +202,9 @@ object AzureStorageStream { payload: Option[Source[ByteString, _]], leaseId: Option[String]): Source[Option[ObjectMetadata], NotUsed.type] = { Source - .fromMaterializer { (mat, _) => + .fromMaterializer { (mat, attr) => implicit val system: ActorSystem = mat.system - val settings = StorageSettings(system) + val settings = resolveSettings(attr, system) val contentLength = range.last - range.first + 1 val clearRange = payload.isEmpty val writeType = if (clearRange) "clear" else "update" @@ -365,6 +365,18 @@ object AzureStorageStream { queryString = queryString) } + private def resolveSettings(attr: Attributes, sys: ActorSystem) = + attr + .get[StorageSettingsValue] + .map(_.settings) + .getOrElse { + val storageExtension = StorageExt(sys) + attr + .get[StorageSettingsPath] + .map(settingsPath => storageExtension.settings(settingsPath.path)) + .getOrElse(storageExtension.settings) + } + // `Content-Type` header is by design not accessible as header. So need to have a custom // header implementation to expose that private case class CustomContentTypeHeader(contentType: ContentType) extends CustomHeader { From e41869e3080150d87125bd500d6e3dd2a5826211 Mon Sep 17 00:00:00 2001 From: sfali Date: Sat, 24 Aug 2024 12:24:00 -0400 Subject: [PATCH 11/78] Added support for SharedKeyLite authorization #3253 --- .../azure/storage/impl/auth/Signer.scala | 66 ++++++++++++------- 1 file changed, 44 insertions(+), 22 deletions(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/auth/Signer.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/auth/Signer.scala index c41c55dc5e..c28e016e87 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/auth/Signer.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/auth/Signer.scala @@ -37,44 +37,47 @@ final case class Signer(initialRequest: HttpRequest, settings: StorageSettings) mac } - def signedRequest: Source[HttpRequest, NotUsed] = - if (settings.authorizationType == "anon") Source.single(requestWithHeaders) + def signedRequest: Source[HttpRequest, NotUsed] = { + import Signer._ + val authorizationType = settings.authorizationType + if (authorizationType == "anon") Source.single(requestWithHeaders) else { - val contentLengthValue = getHeaderValue(`Content-Length`.name, "0") - val contentLength = if (contentLengthValue == "0") "" else contentLengthValue - - val headersToSign = Seq( - requestWithHeaders.method.value.toUpperCase, - getHeaderValue(`Content-Encoding`.name, ""), - getHeaderValue("Content-Language", ""), - contentLength, - getHeaderValue("Content-MD5", ""), - getHeaderValue(`Content-Type`.name, ""), - "", // date, we are using `x-ms-date` instead - getHeaderValue(`If-Modified-Since`.name, ""), - getHeaderValue(`If-Match`.name, ""), - getHeaderValue(`If-None-Match`.name, ""), - getHeaderValue(`If-Unmodified-Since`.name, ""), - getHeaderValue(Range.name, "") - ) ++ getAdditionalXmsHeaders ++ getCanonicalizedResource + val headersToSign = + if (authorizationType == "SharedKeyLite") buildHeadersToSign(SharedKeyLiteHeaders) + else buildHeadersToSign(SharedKeyHeaders) val signature = sign(headersToSign) - Source.single( requestWithHeaders.addHeader( - RawHeader(AuthorizationHeaderKey, s"${settings.authorizationType} ${credential.accountName}:$signature") + RawHeader(AuthorizationHeaderKey, s"$authorizationType ${credential.accountName}:$signature") ) ) } + } private def getHeaderOptionalValue(headerName: String) = requestWithHeaders.headers.collectFirst { case header if header.name() == headerName => header.value() } - private def getHeaderValue(headerName: String, defaultValue: String) = + private def getHeaderValue(headerName: String, defaultValue: String = "") = getHeaderOptionalValue(headerName).getOrElse(defaultValue) + private def getContentLengthValue = { + val contentLengthValue = getHeaderValue(`Content-Length`.name, "0") + if (contentLengthValue == "0") "" else contentLengthValue + } + + private def buildHeadersToSign(headerNames: Seq[String]) = { + def getValue(headerName: String) = { + if (headerName == `Content-Length`.name) getContentLengthValue + else getHeaderValue(headerName) + } + + Seq(requestWithHeaders.method.value.toUpperCase) ++ headerNames.map(getValue) ++ + getAdditionalXmsHeaders ++ getCanonicalizedResource + } + private def getAdditionalXmsHeaders = requestWithHeaders.headers.filter(header => header.name().startsWith("x-ms-")).sortBy(_.name()).map { header => s"${header.name()}:${header.value()}" @@ -100,3 +103,22 @@ final case class Signer(initialRequest: HttpRequest, settings: StorageSettings) private def sign(headersToSign: Seq[String]) = Base64.getEncoder.encodeToString(mac.doFinal(headersToSign.mkString(NewLine).getBytes)) } + +object Signer { + private val SharedKeyHeaders = + Seq( + `Content-Encoding`.name, + "Content-Language", + `Content-Length`.name, + "Content-MD5", + `Content-Type`.name, + Date.name, + `If-Modified-Since`.name, + `If-Match`.name, + `If-None-Match`.name, + `If-Unmodified-Since`.name, + Range.name + ) + + private val SharedKeyLiteHeaders = Seq("Content-MD5", `Content-Type`.name, Date.name) +} From 5d3b55d6cd3c42321212fecd6c2fd0d10faf040d Mon Sep 17 00:00:00 2001 From: sfali Date: Sat, 24 Aug 2024 17:09:06 -0400 Subject: [PATCH 12/78] Added support for SAS authorization #3253 --- .../src/main/resources/reference.conf | 10 ++++-- .../azure/storage/impl/auth/Signer.scala | 4 +-- .../alpakka/azure/storage/settings.scala | 31 ++++++++++++++++--- 3 files changed, 36 insertions(+), 9 deletions(-) diff --git a/azure-storage/src/main/resources/reference.conf b/azure-storage/src/main/resources/reference.conf index 3316383349..eb7f687232 100644 --- a/azure-storage/src/main/resources/reference.conf +++ b/azure-storage/src/main/resources/reference.conf @@ -5,15 +5,21 @@ alpakka { signing-algorithm = "HmacSHA256" credentials { - # valid values are anon (annonymous), SharedKey, SharedKeyLite, SAS + # valid values are anon (annonymous), SharedKey, SharedKeyLite, sas authorization-type = anon authorization-type = ${?AZURE_STORAGE_AUTHORIZATION_TYPE} - # following + # required for all authorization types account-name = none account-name = ${?AZURE_STORAGE_ACCOUNT_NAME} + + # Account key is required for SharedKey or SharedKeyLite authorization account-key = none account-key = ${?AZURE_STORAGE_ACCOUNT_KEY} + + # SAS token for sas authorization + sas-token = "" + sas-token = ${?AZURE_STORAGE_SAS_TOKEN} } # Default settings corresponding to automatic retry of requests in an S3 stream. diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/auth/Signer.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/auth/Signer.scala index c28e016e87..3377eec1a8 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/auth/Signer.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/auth/Signer.scala @@ -31,7 +31,7 @@ final case class Signer(initialRequest: HttpRequest, settings: StorageSettings) .addHeader(RawHeader(XmsDateHeaderKey, getFormattedDate)) .addHeader(RawHeader(XmsVersionHeaderKey, settings.apiVersion)) - private val mac = { + private lazy val mac = { val mac = Mac.getInstance(settings.algorithm) mac.init(new SecretKeySpec(credential.accountKey, settings.algorithm)) mac @@ -40,7 +40,7 @@ final case class Signer(initialRequest: HttpRequest, settings: StorageSettings) def signedRequest: Source[HttpRequest, NotUsed] = { import Signer._ val authorizationType = settings.authorizationType - if (authorizationType == "anon") Source.single(requestWithHeaders) + if (authorizationType == "anon" || authorizationType == "sas") Source.single(requestWithHeaders) else { val headersToSign = if (authorizationType == "SharedKeyLite") buildHeadersToSign(SharedKeyLiteHeaders) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala index 2960652ee8..3326a662a1 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala @@ -10,13 +10,15 @@ import akka.actor.ClassicActorSystemProvider import com.typesafe.config.Config import java.time.{Duration => JavaDuration} -import java.util.Objects +import java.util.{Objects, Optional} import java.util.concurrent.TimeUnit import scala.concurrent.duration._ +import scala.jdk.OptionConverters._ final class StorageSettings(val apiVersion: String, val authorizationType: String, val azureNameKeyCredential: AzureNameKeyCredential, + val sasToken: Option[String], val retrySettings: RetrySettings, val algorithm: String) { @@ -29,6 +31,9 @@ final class StorageSettings(val apiVersion: String, /** Java API */ def getAzureNameKeyCredential: AzureNameKeyCredential = azureNameKeyCredential + /** Java API */ + def getSasToken: Optional[String] = sasToken.toJava + /** Java API */ def getRetrySettings: RetrySettings = retrySettings @@ -45,6 +50,9 @@ final class StorageSettings(val apiVersion: String, def withAzureNameKeyCredential(azureNameKeyCredential: AzureNameKeyCredential): StorageSettings = copy(azureNameKeyCredential = azureNameKeyCredential) + /** Java API */ + def withSasToken(sasToken: String): StorageSettings = copy(sasToken = emptyStringToOption(sasToken)) + /** Java API */ def withRetrySettings(retrySettings: RetrySettings): StorageSettings = copy(retrySettings = retrySettings) @@ -56,6 +64,7 @@ final class StorageSettings(val apiVersion: String, | apiVersion=$apiVersion, | authorizationType=$authorizationType, | azureNameKeyCredential=$azureNameKeyCredential, + | sasToken=$sasToken | retrySettings=$retrySettings, | algorithm=$algorithm |)""".stripMargin.replaceAll(System.lineSeparator(), "") @@ -65,6 +74,7 @@ final class StorageSettings(val apiVersion: String, apiVersion == that.apiVersion && authorizationType == that.authorizationType && Objects.equals(azureNameKeyCredential, that.azureNameKeyCredential) && + sasToken == that.sasToken && Objects.equals(retrySettings, that.retrySettings) && algorithm == that.algorithm @@ -72,16 +82,17 @@ final class StorageSettings(val apiVersion: String, } override def hashCode(): Int = - Objects.hash(apiVersion, authorizationType, azureNameKeyCredential, retrySettings, algorithm) + Objects.hash(apiVersion, authorizationType, azureNameKeyCredential, sasToken, retrySettings, algorithm) private def copy( apiVersion: String = apiVersion, authorizationType: String = authorizationType, azureNameKeyCredential: AzureNameKeyCredential = azureNameKeyCredential, + sasToken: Option[String] = sasToken, retrySettings: RetrySettings = retrySettings, algorithm: String = algorithm ) = - StorageSettings(apiVersion, authorizationType, azureNameKeyCredential, retrySettings, algorithm) + StorageSettings(apiVersion, authorizationType, azureNameKeyCredential, sasToken, retrySettings, algorithm) } object StorageSettings { @@ -91,31 +102,41 @@ object StorageSettings { apiVersion: String, authorizationType: String, azureNameKeyCredential: AzureNameKeyCredential, + sasToken: Option[String], retrySettings: RetrySettings, algorithm: String ): StorageSettings = - new StorageSettings(apiVersion, authorizationType, azureNameKeyCredential, retrySettings, algorithm) + new StorageSettings(apiVersion, authorizationType, azureNameKeyCredential, sasToken, retrySettings, algorithm) /** Java API */ def create( apiVersion: String, authorizationType: String, azureNameKeyCredential: AzureNameKeyCredential, + sasToken: Optional[String], retrySettings: RetrySettings, algorithm: String ): StorageSettings = - StorageSettings(apiVersion, authorizationType, azureNameKeyCredential, retrySettings, algorithm) + StorageSettings(apiVersion, + authorizationType, + azureNameKeyCredential, + Option(sasToken.orElse(null)), + retrySettings, + algorithm) def apply(config: Config): StorageSettings = { val apiVersion = config.getString("api-version") val credentials = config.getConfig("credentials") val authorizationType = credentials.getString("authorization-type") + val sasToken = + if (credentials.hasPath("sas-token")) emptyStringToOption(credentials.getString("sas-token")) else None val algorithm = config.getString("signing-algorithm") StorageSettings( apiVersion = apiVersion, authorizationType = authorizationType, azureNameKeyCredential = AzureNameKeyCredential(credentials), + sasToken = sasToken, retrySettings = RetrySettings(config.getConfig("retry-settings")), algorithm = algorithm ) From dcc9c2166da58d60c64997b87a3a725e0593480b Mon Sep 17 00:00:00 2001 From: sfali Date: Sat, 24 Aug 2024 17:10:06 -0400 Subject: [PATCH 13/78] use option for null String #3253 --- .../main/scala/akka/stream/alpakka/azure/storage/package.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala index e1d049b843..028b25d079 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala @@ -36,5 +36,5 @@ package object storage { /** This method returns `None` if given an empty `String`. This is typically used when parsing XML since its common to * have XML elements with an empty text value inside. */ - private[storage] def emptyStringToOption(value: String): Option[String] = if (value == "") None else Some(value) + private[storage] def emptyStringToOption(value: String): Option[String] = if (value == "") None else Option(value) } From 582edb3b2fc464c62b552e6e6a787bed622f1dd4 Mon Sep 17 00:00:00 2001 From: sfali Date: Sat, 24 Aug 2024 17:10:49 -0400 Subject: [PATCH 14/78] function to get default settings #3253 --- .../stream/alpakka/azure/storage/StorageAttributes.scala | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageAttributes.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageAttributes.scala index ddddab1cdc..b5890bdac6 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageAttributes.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageAttributes.scala @@ -23,6 +23,11 @@ object StorageAttributes { * Config path which will be used to resolve required AzureStorage settings */ def settingsPath(path: String): Attributes = Attributes(StorageSettingsPath(path)) + + /** + * Default settings + */ + def defaultSettings: Attributes = Attributes(StorageSettingsPath.Default) } final class StorageSettingsPath private (val path: String) extends Attribute From 7dd77c674a096199b4e519153a136efb973b0874 Mon Sep 17 00:00:00 2001 From: sfali Date: Sat, 24 Aug 2024 17:12:27 -0400 Subject: [PATCH 15/78] Support for SAS authorization #3253 1. Create query string with SAS token 2. Initialize query string --- .../storage/impl/AzureStorageStream.scala | 38 ++++++++++++------- 1 file changed, 25 insertions(+), 13 deletions(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala index fc1e02bc68..9d87f872d8 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala @@ -59,7 +59,7 @@ object AzureStorageStream { accountName = settings.azureNameKeyCredential.accountName, storageType = storageType, objectPath = objectPath, - queryString = versionId.map(value => s"versionId=$value"), + queryString = createQueryString(settings, versionId.map(value => s"versionId=$value")), headers = populateCommonHeaders(HttpEntity.Empty, range = range, leaseId = leaseId) ) val objectMetadataMat = Promise[ObjectMetadata]() @@ -96,7 +96,7 @@ object AzureStorageStream { accountName = settings.azureNameKeyCredential.accountName, storageType = storageType, objectPath = objectPath, - queryString = versionId.map(value => s"versionId=$value"), + queryString = createQueryString(settings, versionId.map(value => s"versionId=$value")), headers = populateCommonHeaders(HttpEntity.Empty, leaseId = leaseId) ) @@ -129,7 +129,7 @@ object AzureStorageStream { accountName = settings.azureNameKeyCredential.accountName, storageType = storageType, objectPath = objectPath, - queryString = versionId.map(value => s"versionId=$value"), + queryString = createQueryString(settings, versionId.map(value => s"versionId=$value")), headers = populateCommonHeaders(HttpEntity.Empty, leaseId = leaseId) ) @@ -160,10 +160,11 @@ object AzureStorageStream { val httpEntity = HttpEntity(contentType, contentLength, payload) val request = createRequest( - HttpMethods.PUT, - settings.azureNameKeyCredential.accountName, - BlobType, - objectPath, + method = HttpMethods.PUT, + accountName = settings.azureNameKeyCredential.accountName, + storageType = BlobType, + objectPath = objectPath, + queryString = createQueryString(settings), headers = populateCommonHeaders(httpEntity, blobType = Some(blobType), leaseId = leaseId) ).withEntity(httpEntity) handlePutRequest(request, settings) @@ -181,10 +182,11 @@ object AzureStorageStream { val settings = resolveSettings(attr, system) val request = createRequest( - HttpMethods.PUT, - settings.azureNameKeyCredential.accountName, - FileType, - objectPath, + method = HttpMethods.PUT, + accountName = settings.azureNameKeyCredential.accountName, + storageType = FileType, + objectPath = objectPath, + queryString = createQueryString(settings), headers = Seq( CustomContentTypeHeader(contentType), RawHeader(XMsContentLengthHeaderKey, maxSize.toString), @@ -217,7 +219,7 @@ object AzureStorageStream { accountName = settings.azureNameKeyCredential.accountName, storageType = FileType, objectPath = objectPath, - queryString = Some("comp=range"), + queryString = createQueryString(settings, Some("comp=range")), headers = populateCommonHeaders(httpEntity, overrideContentLength, range = Some(range), @@ -354,7 +356,7 @@ object AzureStorageStream { accountName: String, storageType: String, objectPath: String, - queryString: Option[String] = None, + queryString: Option[String], headers: Seq[HttpHeader]) = HttpRequest(method = method, uri = createUri(accountName, storageType, objectPath, queryString), headers = headers) @@ -365,6 +367,16 @@ object AzureStorageStream { queryString = queryString) } + private def createQueryString(settings: StorageSettings, apiQueryString: Option[String] = None) = { + if (settings.authorizationType == "sas") { + if (settings.sasToken.isEmpty) throw new RuntimeException("SAS token must be defined for SAS authorization type.") + else { + val sasToken = settings.sasToken.get + Some(apiQueryString.map(qs => s"$sasToken&$qs").getOrElse(sasToken)) + } + } else apiQueryString + } + private def resolveSettings(attr: Attributes, sys: ActorSystem) = attr .get[StorageSettingsValue] From d29f3f9401fcef9cfed2d2d0fa5857a5ff69fe50 Mon Sep 17 00:00:00 2001 From: sfali Date: Sat, 24 Aug 2024 17:17:52 -0400 Subject: [PATCH 16/78] Splitting service operations based on service type #3253 --- .../azure/storage/javadsl/BlobService.scala | 127 ++++++++++++++++++ .../{AzureStorage.scala => FileService.scala} | 110 +-------------- 2 files changed, 132 insertions(+), 105 deletions(-) create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala rename azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/{AzureStorage.scala => FileService.scala} (59%) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala new file mode 100644 index 0000000000..33d600cbdf --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala @@ -0,0 +1,127 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package javadsl + +import akka.NotUsed +import akka.http.javadsl.model._ +import akka.http.javadsl.model.headers.ByteRange +import akka.http.scaladsl.model.headers.{ByteRange => ScalaByteRange} +import akka.http.scaladsl.model.{ContentType => ScalaContentType} +import akka.stream.alpakka.azure.storage.impl.AzureStorageStream +import akka.stream.javadsl.Source +import akka.stream.scaladsl.SourceToCompletionStage +import akka.util.ByteString + +import java.util.Optional +import java.util.concurrent.CompletionStage + +/** + * Java API for BlobService operations. + */ +object BlobService { + + /** + * Gets blob representing `objectPath` with specified range (if applicable). + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` + * @param range range to download + * @param versionId versionId of the blob (if applicable) + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.javadsl.Source]] containing the objects data as a [[akka.util.ByteString]] along with a + * materialized value containing the [[akka.stream.alpakka.azure.storage.ObjectMetadata]] + */ + def getBlob(objectPath: String, + range: ByteRange, + versionId: Optional[String], + leaseId: Optional[String]): Source[ByteString, CompletionStage[ObjectMetadata]] = { + val scalaRange = range.asInstanceOf[ScalaByteRange] + new Source( + AzureStorageStream + .getObject(BlobType, objectPath, Some(scalaRange), Option(versionId.orElse(null)), Option(leaseId.orElse(null))) + .toCompletionStage() + ) + } + + /** + * Gets blob representing `objectPath` with specified range (if applicable). + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` + * @param versionId versionId of the blob (if applicable) + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.javadsl.Source]] containing the objects data as a [[akka.util.ByteString]] along with a + * materialized value containing the [[akka.stream.alpakka.azure.storage.ObjectMetadata]] + */ + def getBlob(objectPath: String, + versionId: Optional[String], + leaseId: Optional[String]): Source[ByteString, CompletionStage[ObjectMetadata]] = + new Source( + AzureStorageStream + .getObject(BlobType, objectPath, None, Option(versionId.orElse(null)), Option(leaseId.orElse(null))) + .toCompletionStage() + ) + + /** + * Gets blob properties. + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` + * @param versionId versionId of the blob (if applicable) + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def getProperties(objectPath: String, + versionId: Optional[String], + leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = + AzureStorageStream + .getObjectProperties(BlobType, objectPath, Option(versionId.orElse(null)), Option(leaseId.orElse(null))) + .map(opt => Optional.ofNullable(opt.orNull)) + .asJava + + /** + * Deletes blob. + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` + * @param versionId versionId of the blob (if applicable) + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def deleteBlob(objectPath: String, + versionId: Optional[String], + leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = + AzureStorageStream + .deleteObject(BlobType, objectPath, Option(versionId.orElse(null)), Option(leaseId.orElse(null))) + .map(opt => Optional.ofNullable(opt.orNull)) + .asJava + + /** + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` + * @param contentType content type of the blob + * @param contentLength length of the blob + * @param payload actual payload, a [[akka.stream.javadsl.Source Source]] of [[akka.util.ByteString ByteString]] + * @param blobType type of the blob, ''Must be one of:'' __'''BlockBlob, PageBlob, or AppendBlob'''__ + * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def putBlob(objectPath: String, + contentType: ContentType, + contentLength: Long, + payload: Source[ByteString, _], + blobType: String = "BlockBlob", + leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = + AzureStorageStream + .putBlob(blobType, + objectPath, + contentType.asInstanceOf[ScalaContentType], + contentLength, + payload.asScala, + Option(leaseId.orElse(null))) + .map(opt => Optional.ofNullable(opt.orNull)) + .asJava +} diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/AzureStorage.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala similarity index 59% rename from azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/AzureStorage.scala rename to azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala index eb714e91ed..53a87f6259 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/AzureStorage.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala @@ -22,49 +22,9 @@ import java.util.Optional import java.util.concurrent.CompletionStage /** - * Java API + * Java API FileService operations */ -object AzureStorage { - - /** - * Gets blob representing `objectPath` with specified range (if applicable). - * - * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` - * @param range range to download - * @param versionId versionId of the blob (if applicable) - * @param leaseId lease ID of an active lease (if applicable) - * @return A [[akka.stream.javadsl.Source]] containing the objects data as a [[akka.util.ByteString]] along with a - * materialized value containing the [[akka.stream.alpakka.azure.storage.ObjectMetadata]] - */ - def getBlob(objectPath: String, - range: ByteRange, - versionId: Optional[String], - leaseId: Optional[String]): Source[ByteString, CompletionStage[ObjectMetadata]] = { - val scalaRange = range.asInstanceOf[ScalaByteRange] - new Source( - AzureStorageStream - .getObject(BlobType, objectPath, Some(scalaRange), Option(versionId.orElse(null)), Option(leaseId.orElse(null))) - .toCompletionStage() - ) - } - - /** - * Gets blob representing `objectPath` with specified range (if applicable). - * - * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` - * @param versionId versionId of the blob (if applicable) - * @param leaseId lease ID of an active lease (if applicable) - * @return A [[akka.stream.javadsl.Source]] containing the objects data as a [[akka.util.ByteString]] along with a - * materialized value containing the [[akka.stream.alpakka.azure.storage.ObjectMetadata]] - */ - def getBlob(objectPath: String, - versionId: Optional[String], - leaseId: Optional[String]): Source[ByteString, CompletionStage[ObjectMetadata]] = - new Source( - AzureStorageStream - .getObject(BlobType, objectPath, None, Option(versionId.orElse(null)), Option(leaseId.orElse(null))) - .toCompletionStage() - ) +object FileService { /** * Gets file representing `objectPath` with specified range (if applicable). @@ -107,23 +67,6 @@ object AzureStorage { ) } - /** - * Gets blob properties. - * - * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` - * @param versionId versionId of the blob (if applicable) - * @param leaseId lease ID of an active lease (if applicable) - * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of - * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist - */ - def getBlobProperties(objectPath: String, - versionId: Optional[String], - leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = - AzureStorageStream - .getObjectProperties(BlobType, objectPath, Option(versionId.orElse(null)), Option(leaseId.orElse(null))) - .map(opt => Optional.ofNullable(opt.orNull)) - .asJava - /** * Gets file properties. * @@ -133,31 +76,14 @@ object AzureStorage { * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ - def getFileProperties(objectPath: String, - versionId: Optional[String], - leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = + def getProperties(objectPath: String, + versionId: Optional[String], + leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream .getObjectProperties(FileType, objectPath, Option(versionId.orElse(null)), Option(leaseId.orElse(null))) .map(opt => Optional.ofNullable(opt.orNull)) .asJava - /** - * Deletes blob. - * - * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` - * @param versionId versionId of the blob (if applicable) - * @param leaseId lease ID of an active lease (if applicable) - * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of - * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist - */ - def deleteBlob(objectPath: String, - versionId: Optional[String], - leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = - AzureStorageStream - .deleteObject(BlobType, objectPath, Option(versionId.orElse(null)), Option(leaseId.orElse(null))) - .map(opt => Optional.ofNullable(opt.orNull)) - .asJava - /** * Deletes file. * @@ -175,32 +101,6 @@ object AzureStorage { .map(opt => Optional.ofNullable(opt.orNull)) .asJava - /** - * - * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` - * @param contentType content type of the blob - * @param contentLength length of the blob - * @param payload actual payload, a [[akka.stream.javadsl.Source Source]] of [[akka.util.ByteString ByteString]] - * @param blobType type of the blob, ''Must be one of:'' __'''BlockBlob, PageBlob, or AppendBlob'''__ - * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of - * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist - */ - def putBlob(objectPath: String, - contentType: ContentType, - contentLength: Long, - payload: Source[ByteString, _], - blobType: String = "BlockBlob", - leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = - AzureStorageStream - .putBlob(blobType, - objectPath, - contentType.asInstanceOf[ScalaContentType], - contentLength, - payload.asScala, - Option(leaseId.orElse(null))) - .map(opt => Optional.ofNullable(opt.orNull)) - .asJava - /** * Creates a file. * From 997746aeb0908fa9056dcc32be57caa30d3b1626 Mon Sep 17 00:00:00 2001 From: sfali Date: Sat, 24 Aug 2024 17:21:06 -0400 Subject: [PATCH 17/78] Splitting service operations based on service type #3253 --- .../azure/storage/scaladsl/BlobService.scala | 85 +++++++++++++++++++ .../{AzureStorage.scala => FileService.scala} | 72 ++-------------- 2 files changed, 90 insertions(+), 67 deletions(-) create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala rename azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/{AzureStorage.scala => FileService.scala} (59%) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala new file mode 100644 index 0000000000..455b315ace --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala @@ -0,0 +1,85 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package scaladsl + +import akka.NotUsed +import akka.http.scaladsl.model.{ContentType, ContentTypes} +import akka.http.scaladsl.model.headers.ByteRange +import akka.stream.alpakka.azure.storage.impl.AzureStorageStream +import akka.stream.scaladsl.Source +import akka.util.ByteString + +import scala.concurrent.Future + +/** + * Scala API for BlobService operations. + */ +object BlobService { + + /** + * Gets blob representing `objectPath` with specified range (if applicable). + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` + * @param range range to download + * @param versionId versionId of the blob (if applicable) + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.scaladsl.Source]] containing the objects data as a [[akka.util.ByteString]] along with a + * materialized value containing the [[akka.stream.alpakka.azure.storage.ObjectMetadata]] + */ + def getBlob(objectPath: String, + range: Option[ByteRange] = None, + versionId: Option[String] = None, + leaseId: Option[String] = None): Source[ByteString, Future[ObjectMetadata]] = + AzureStorageStream.getObject(BlobType, objectPath, range, versionId, leaseId) + + /** + * Gets blob properties. + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` + * @param versionId versionId of the blob (if applicable) + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def getProperties(objectPath: String, + versionId: Option[String] = None, + leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = + AzureStorageStream.getObjectProperties(BlobType, objectPath, versionId, leaseId) + + /** + * Deletes blob. + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` + * @param versionId versionId of the blob (if applicable) + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def deleteBlob(objectPath: String, + versionId: Option[String] = None, + leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = + AzureStorageStream.deleteObject(BlobType, objectPath, versionId, leaseId) + + /** + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` + * @param contentType content type of the blob + * @param contentLength length of the blob + * @param payload actual payload, a [[akka.stream.scaladsl.Source Source]] of [[akka.util.ByteString ByteString]] + * @param blobType type of the blob, ''Must be one of:'' __'''BlockBlob, PageBlob, or AppendBlob'''__ + * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def putBlob(objectPath: String, + contentType: ContentType = ContentTypes.`application/octet-stream`, + contentLength: Long, + payload: Source[ByteString, _], + blobType: String = "BlockBlob", + leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = + AzureStorageStream.putBlob(blobType, objectPath, contentType, contentLength, payload, leaseId) +} diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureStorage.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala similarity index 59% rename from azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureStorage.scala rename to azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala index a67f4aeed2..344cce4f8c 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureStorage.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala @@ -17,25 +17,9 @@ import akka.util.ByteString import scala.concurrent.Future /** - * Scala API + * Scala API for FileService operations. */ -object AzureStorage { - - /** - * Gets blob representing `objectPath` with specified range (if applicable). - * - * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` - * @param range range to download - * @param versionId versionId of the blob (if applicable) - * @param leaseId lease ID of an active lease (if applicable) - * @return A [[akka.stream.scaladsl.Source]] containing the objects data as a [[akka.util.ByteString]] along with a - * materialized value containing the [[akka.stream.alpakka.azure.storage.ObjectMetadata]] - */ - def getBlob(objectPath: String, - range: Option[ByteRange] = None, - versionId: Option[String] = None, - leaseId: Option[String] = None): Source[ByteString, Future[ObjectMetadata]] = - AzureStorageStream.getObject(BlobType, objectPath, range, versionId, leaseId) +object FileService { /** * Gets file representing `objectPath` with specified range (if applicable). @@ -53,20 +37,6 @@ object AzureStorage { leaseId: Option[String] = None): Source[ByteString, Future[ObjectMetadata]] = AzureStorageStream.getObject(FileType, objectPath, range, versionId, leaseId) - /** - * Gets blob properties. - * - * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` - * @param versionId versionId of the blob (if applicable) - * @param leaseId lease ID of an active lease (if applicable) - * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of - * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist - */ - def getBlobProperties(objectPath: String, - versionId: Option[String] = None, - leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.getObjectProperties(BlobType, objectPath, versionId, leaseId) - /** * Gets file properties. * @@ -76,25 +46,11 @@ object AzureStorage { * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ - def getFileProperties(objectPath: String, - versionId: Option[String] = None, - leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = + def getProperties(objectPath: String, + versionId: Option[String] = None, + leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = AzureStorageStream.getObjectProperties(FileType, objectPath, versionId, leaseId) - /** - * Deletes blob. - * - * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` - * @param versionId versionId of the blob (if applicable) - * @param leaseId lease ID of an active lease (if applicable) - * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of - * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist - */ - def deleteBlob(objectPath: String, - versionId: Option[String] = None, - leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.deleteObject(BlobType, objectPath, versionId, leaseId) - /** * Deletes file. * @@ -109,24 +65,6 @@ object AzureStorage { leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = AzureStorageStream.deleteObject(FileType, objectPath, versionId, leaseId) - /** - * - * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` - * @param contentType content type of the blob - * @param contentLength length of the blob - * @param payload actual payload, a [[akka.stream.scaladsl.Source Source]] of [[akka.util.ByteString ByteString]] - * @param blobType type of the blob, ''Must be one of:'' __'''BlockBlob, PageBlob, or AppendBlob'''__ - * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of - * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist - */ - def putBlob(objectPath: String, - contentType: ContentType = ContentTypes.`application/octet-stream`, - contentLength: Long, - payload: Source[ByteString, _], - blobType: String = "BlockBlob", - leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.putBlob(blobType, objectPath, contentType, contentLength, payload, leaseId) - /** * Creates a file. * From 34832f64a23c49738defafdfed49c5bd70d4428f Mon Sep 17 00:00:00 2001 From: sfali Date: Sat, 24 Aug 2024 21:16:01 -0400 Subject: [PATCH 18/78] StorageException test #3253 --- .../azure/storage/StorageException.scala | 14 +-- .../azure/storage/StorageExceptionSpec.scala | 93 +++++++++++++++++++ 2 files changed, 101 insertions(+), 6 deletions(-) create mode 100644 azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageExceptionSpec.scala diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageException.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageException.scala index 37b203be23..20eb706a32 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageException.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageException.scala @@ -64,12 +64,14 @@ object StorageException { ) } match { case Failure(ex) => - StorageException(statusCode = statusCode, - errorCode = ex.getMessage, - errorMessage = response, - resourceName = None, - resourceValue = None, - reason = None) + StorageException( + statusCode = statusCode, + errorCode = Option(ex.getMessage).getOrElse("null"), + errorMessage = Option(response).getOrElse("null"), + resourceName = None, + resourceValue = None, + reason = None + ) case Success(value) => value } diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageExceptionSpec.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageExceptionSpec.scala new file mode 100644 index 0000000000..3fc450f384 --- /dev/null +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageExceptionSpec.scala @@ -0,0 +1,93 @@ +package akka.stream.alpakka +package azure +package storage + +import akka.http.scaladsl.model.StatusCodes +import org.scalatest.flatspec.AnyFlatSpecLike +import org.scalatest.matchers.should.Matchers + +class StorageExceptionSpec extends AnyFlatSpecLike with Matchers { + + "Storage exception" should "be parsed" in { + val ex = StorageException("invalid xml", StatusCodes.NotFound) + ex.toString shouldBe """StorageException(statusCode=404 Not Found, errorCode=Content is not allowed in prolog., + | errorMessage=invalid xml, resourceName=None, resourceValue=None, reason=None) + |""".stripMargin.replaceAll(System.lineSeparator(), "") + } + + it should "parse XML containing error related to query" in { + val xml = + """ + | + | InvalidQueryParameterValue + | Value for one of the query parameters specified in the request URI is invalid. + | popreceipt + | 33537277-6a52-4a2b-b4eb-0f905051827b + | invalid receipt format + | + |""".stripMargin + + StorageException(xml, StatusCodes.BadRequest) shouldBe StorageException( + statusCode = StatusCodes.BadRequest, + errorCode = "InvalidQueryParameterValue", + errorMessage = "Value for one of the query parameters specified in the request URI is invalid.", + resourceName = Some("popreceipt"), + resourceValue = Some("33537277-6a52-4a2b-b4eb-0f905051827b"), + reason = Some("invalid receipt format") + ) + } + + it should "parse XML containing error related to header" in { + val xml = + """ + | + | InvalidHeaderValue + | Invalid header + | popreceipt + | 33537277-6a52-4a2b-b4eb-0f905051827b + | invalid receipt format + | + |""".stripMargin + + StorageException(xml, StatusCodes.BadRequest) shouldBe StorageException( + statusCode = StatusCodes.BadRequest, + errorCode = "InvalidHeaderValue", + errorMessage = "Invalid header", + resourceName = Some("popreceipt"), + resourceValue = Some("33537277-6a52-4a2b-b4eb-0f905051827b"), + reason = Some("invalid receipt format") + ) + } + + it should "parse XML containing error related to authentication" in { + val xml = + """ + | + | InvalidAuthenticationInfo + | Authentication failed + | Server failed to authenticate the request. Please refer to the information in the www-authenticate header. + | + |""".stripMargin + + StorageException(xml, StatusCodes.Unauthorized) shouldBe StorageException( + statusCode = StatusCodes.Unauthorized, + errorCode = "InvalidAuthenticationInfo", + errorMessage = "Authentication failed", + resourceName = None, + resourceValue = None, + reason = Some( + "Server failed to authenticate the request. Please refer to the information in the www-authenticate header." + ) + ) + } + + it should "survive null" in { + StorageException(null, StatusCodes.ServiceUnavailable) shouldBe + StorageException(statusCode = StatusCodes.ServiceUnavailable, + errorCode = "null", + errorMessage = "null", + resourceName = None, + resourceValue = None, + reason = None) + } +} From 6cc3254a1d43322f039ad7768117cf97b9707370 Mon Sep 17 00:00:00 2001 From: sfali Date: Sun, 25 Aug 2024 10:37:01 -0400 Subject: [PATCH 19/78] creating constants for authorization types #3253 --- .../alpakka/azure/storage/impl/AzureStorageStream.scala | 2 +- .../akka/stream/alpakka/azure/storage/impl/auth/Signer.scala | 5 +++-- .../scala/akka/stream/alpakka/azure/storage/package.scala | 4 ++++ 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala index 9d87f872d8..775580ffc8 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala @@ -368,7 +368,7 @@ object AzureStorageStream { } private def createQueryString(settings: StorageSettings, apiQueryString: Option[String] = None) = { - if (settings.authorizationType == "sas") { + if (settings.authorizationType == SasAuthorizationType) { if (settings.sasToken.isEmpty) throw new RuntimeException("SAS token must be defined for SAS authorization type.") else { val sasToken = settings.sasToken.get diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/auth/Signer.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/auth/Signer.scala index 3377eec1a8..93869fda3f 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/auth/Signer.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/auth/Signer.scala @@ -40,10 +40,11 @@ final case class Signer(initialRequest: HttpRequest, settings: StorageSettings) def signedRequest: Source[HttpRequest, NotUsed] = { import Signer._ val authorizationType = settings.authorizationType - if (authorizationType == "anon" || authorizationType == "sas") Source.single(requestWithHeaders) + if (authorizationType == AnonymousAuthorizationType || authorizationType == SasAuthorizationType) + Source.single(requestWithHeaders) else { val headersToSign = - if (authorizationType == "SharedKeyLite") buildHeadersToSign(SharedKeyLiteHeaders) + if (authorizationType == SharedKeyLiteAuthorizationType) buildHeadersToSign(SharedKeyLiteHeaders) else buildHeadersToSign(SharedKeyHeaders) val signature = sign(headersToSign) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala index 028b25d079..b328fb15bb 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala @@ -19,6 +19,10 @@ package object storage { private[storage] val FileWriteTypeHeaderKey = "x-ms-write" private[storage] val XMsContentLengthHeaderKey = "x-ms-content-length" private[storage] val FileTypeHeaderKey = "x-ms-type" + private[storage] val AnonymousAuthorizationType = "anon" + private[storage] val SharedKeyAuthorizationType = "SharedKey" + private[storage] val SharedKeyLiteAuthorizationType = "SharedKeyLite" + private[storage] val SasAuthorizationType = "sas" private[storage] val BlobType = "blob" private[storage] val FileType = "file" From d51c06a624b202e92704967c8be511e06628c33c Mon Sep 17 00:00:00 2001 From: sfali Date: Sun, 25 Aug 2024 11:44:12 -0400 Subject: [PATCH 20/78] implicit class to handle validations #3253 --- .../akka/stream/alpakka/azure/storage/package.scala | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala index b328fb15bb..402660bbd8 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala @@ -5,6 +5,8 @@ package akka.stream.alpakka package azure +import com.typesafe.config.Config + import java.time.{ZoneOffset, ZonedDateTime} import java.time.format.DateTimeFormatter @@ -41,4 +43,14 @@ package object storage { * have XML elements with an empty text value inside. */ private[storage] def emptyStringToOption(value: String): Option[String] = if (value == "") None else Option(value) + + implicit private[storage] class ConfigOps(src: Config) { + + def getString(path: String, defaultValue: String): String = { + if (src.hasPath(path)) src.getString(path) else defaultValue + } + + def getOptionalString(path: String): Option[String] = + if (src.hasPath(path)) emptyStringToOption(src.getString(path)) else None + } } From 7b3ed577dd6badf8373ead52ec4e244891de1aed Mon Sep 17 00:00:00 2001 From: sfali Date: Sun, 25 Aug 2024 11:44:58 -0400 Subject: [PATCH 21/78] validations of configs and falling back to default values #3253 --- .../src/main/resources/reference.conf | 2 +- .../stream/alpakka/azure/storage/models.scala | 5 +- .../alpakka/azure/storage/settings.scala | 48 ++++++++++++------- 3 files changed, 35 insertions(+), 20 deletions(-) diff --git a/azure-storage/src/main/resources/reference.conf b/azure-storage/src/main/resources/reference.conf index eb7f687232..f80b13f535 100644 --- a/azure-storage/src/main/resources/reference.conf +++ b/azure-storage/src/main/resources/reference.conf @@ -10,7 +10,7 @@ alpakka { authorization-type = ${?AZURE_STORAGE_AUTHORIZATION_TYPE} # required for all authorization types - account-name = none + account-name = "" account-name = ${?AZURE_STORAGE_ACCOUNT_NAME} # Account key is required for SharedKey or SharedKeyLite authorization diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/models.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/models.scala index bf8e72aacd..a8528e70f6 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/models.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/models.scala @@ -25,8 +25,9 @@ object AzureNameKeyCredential { AzureNameKeyCredential(accountName, accountKey) def apply(config: Config): AzureNameKeyCredential = { - val accountName = config.getString("account-name") - val accountKey = config.getString("account-key") + val accountName = config.getString("account-name", "") + val accountKey = config.getString("account-key", "") + if (accountName.isEmpty) throw new RuntimeException("accountName property must be defined") AzureNameKeyCredential(accountName, accountKey) } } diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala index 3326a662a1..3dec2b33de 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala @@ -14,6 +14,7 @@ import java.util.{Objects, Optional} import java.util.concurrent.TimeUnit import scala.concurrent.duration._ import scala.jdk.OptionConverters._ +import scala.util.Try final class StorageSettings(val apiVersion: String, val authorizationType: String, @@ -97,6 +98,8 @@ final class StorageSettings(val apiVersion: String, object StorageSettings { private[storage] val ConfigPath = "alpakka.azure-storage" + private val AuthorizationTypes = + Seq(AnonymousAuthorizationType, SharedKeyAuthorizationType, SharedKeyLiteAuthorizationType, SasAuthorizationType) def apply( apiVersion: String, @@ -125,20 +128,27 @@ object StorageSettings { algorithm) def apply(config: Config): StorageSettings = { - val apiVersion = config.getString("api-version") - val credentials = config.getConfig("credentials") - val authorizationType = credentials.getString("authorization-type") - val sasToken = - if (credentials.hasPath("sas-token")) emptyStringToOption(credentials.getString("sas-token")) else None - val algorithm = config.getString("signing-algorithm") + val apiVersion = config.getString("api-version", "2024-11-04") + + val credentials = + if (config.hasPath("credentials")) config.getConfig("credentials") + else throw new RuntimeException("credentials must be defined.") + + val authorizationType = { + val value = credentials.getString("authorization-type", "anon") + if (AuthorizationTypes.contains(value)) value else AnonymousAuthorizationType + } + + val retrySettings = + if (config.hasPath("retry-settings")) RetrySettings(config.getConfig("retry-settings")) else RetrySettings.Default StorageSettings( apiVersion = apiVersion, authorizationType = authorizationType, azureNameKeyCredential = AzureNameKeyCredential(credentials), - sasToken = sasToken, - retrySettings = RetrySettings(config.getConfig("retry-settings")), - algorithm = algorithm + sasToken = credentials.getOptionalString("sas-token"), + retrySettings = retrySettings, + algorithm = config.getString("signing-algorithm", "HmacSHA256") ) } @@ -208,7 +218,7 @@ final class RetrySettings private (val maxRetries: Int, } object RetrySettings { - val default: RetrySettings = RetrySettings(3, 200.milliseconds, 10.seconds, 0.0) + val Default: RetrySettings = RetrySettings(3, 200.milliseconds, 10.seconds, 0.0) /** Scala API */ def apply( @@ -228,11 +238,15 @@ object RetrySettings { randomFactor ) - def apply(config: Config): RetrySettings = - RetrySettings( - config.getInt("max-retries"), - FiniteDuration(config.getDuration("min-backoff").toNanos, TimeUnit.NANOSECONDS), - FiniteDuration(config.getDuration("max-backoff").toNanos, TimeUnit.NANOSECONDS), - config.getDouble("random-factor") - ) + def apply(config: Config): RetrySettings = { + Try( + RetrySettings( + config.getInt("max-retries"), + FiniteDuration(config.getDuration("min-backoff").toNanos, TimeUnit.NANOSECONDS), + FiniteDuration(config.getDuration("max-backoff").toNanos, TimeUnit.NANOSECONDS), + config.getDouble("random-factor") + ) + ).toOption.getOrElse(Default) + } + } From 07e64a51bd540691fcb10b8814a34be07ccb4837 Mon Sep 17 00:00:00 2001 From: sfali Date: Sun, 25 Aug 2024 11:45:16 -0400 Subject: [PATCH 22/78] StorageSettings tests #3253 --- .../azure/storage/StorageSettingsSpec.scala | 128 ++++++++++++++++++ 1 file changed, 128 insertions(+) create mode 100644 azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageSettingsSpec.scala diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageSettingsSpec.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageSettingsSpec.scala new file mode 100644 index 0000000000..831d14a959 --- /dev/null +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageSettingsSpec.scala @@ -0,0 +1,128 @@ +package akka.stream.alpakka +package azure +package storage + +import com.typesafe.config.ConfigFactory +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpec +import scala.concurrent.duration._ + +import scala.util.{Failure, Success, Try} + +class StorageSettingsSpec extends AnyWordSpec with Matchers { + + private def mkSettings(more: String) = + StorageSettings( + ConfigFactory + .parseString(more) + .withFallback(ConfigFactory.parseString(s""" + |api-version = "2024-11-04" + |signing-algorithm = "HmacSHA256" + |credentials { + | authorization-type = anon + | account-name = mystorageaccount + | account-key = bXlhY2NvdW50a2V5 + | sas-token = "" + |} + |retry-settings { + | max-retries = 5 + | min-backoff = 300ms + | max-backoff = 5s + | random-factor = 0.3 + |} + |""".stripMargin)) + .resolve() + ) + + "StorageSettings" should { + "correctly parse config with anonymous authorization type" in { + val settings = mkSettings("") + settings.authorizationType shouldBe "anon" + } + + "correctly parse config with SharedKey authorization type" in { + val settings = mkSettings("credentials.authorization-type = SharedKey") + settings.authorizationType shouldBe "SharedKey" + } + + "correctly parse config with SharedKeyLite authorization type" in { + val settings = mkSettings("credentials.authorization-type = SharedKeyLite") + settings.authorizationType shouldBe "SharedKeyLite" + } + + "correctly parse config with sas authorization type" in { + val settings = mkSettings("credentials.authorization-type = sas") + settings.authorizationType shouldBe "sas" + } + + "fallback to anonymous authorization type when incorrectly configured" in { + val settings = mkSettings("credentials.authorization-type = dummy") + settings.authorizationType shouldBe "anon" + } + + "successfully parse account name and key" in { + val expected = AzureNameKeyCredential("mystorageaccount", "bXlhY2NvdW50a2V5") + val settings = mkSettings("") + val actual = settings.azureNameKeyCredential + actual shouldBe a[AzureNameKeyCredential] + actual.accountName shouldBe expected.accountName + actual.accountKey shouldEqual expected.accountKey + } + + "throw RuntimeException if credentials are missing" in { + Try(StorageSettings(ConfigFactory.parseString(s""" + |api-version = "2024-11-04" + |signing-algorithm = "HmacSHA256" + | + |retry-settings { + | max-retries = 3 + | min-backoff = 200ms + | max-backoff = 10s + | random-factor = 0.0 + |} + |""".stripMargin).resolve())) match { + case Failure(ex) => + ex shouldBe a[RuntimeException] + ex.getMessage shouldBe "credentials must be defined." + case Success(_) => fail("Should have thrown exception") + } + } + + "throw RuntimeException if account name is empty" in { + Try(mkSettings(""" credentials.account-name="" """)) match { + case Failure(ex) => + ex shouldBe a[RuntimeException] + ex.getMessage shouldBe "accountName property must be defined" + case Success(_) => fail("Should have thrown exception") + } + } + + "parse retry settings" in { + val settings = mkSettings("") + settings.retrySettings shouldBe RetrySettings(maxRetries = 5, + minBackoff = 300.milliseconds, + maxBackoff = 5.seconds, + randomFactor = 0.3) + } + + "use default retry settings if config is missing" in { + val settings = StorageSettings(ConfigFactory.parseString(s""" + |api-version = "2024-11-04" + |signing-algorithm = "HmacSHA256" + |credentials { + | authorization-type = anon + | account-name = mystorageaccount + | account-key = bXlhY2NvdW50a2V5 + | sas-token = "" + |} + | + |""".stripMargin).resolve()) + settings.retrySettings shouldBe RetrySettings.Default + } + + "use default retry settings if config is misconfigured" in { + val settings = mkSettings("retry-settings.min-backoff=hello") + settings.retrySettings shouldBe RetrySettings.Default + } + } +} From 44c2f846d22d4b543de9b3abffd3c0e284896d2f Mon Sep 17 00:00:00 2001 From: sfali Date: Sun, 25 Aug 2024 13:02:14 -0400 Subject: [PATCH 23/78] support for create container operation #3253 --- .../storage/impl/AzureStorageStream.scala | 20 ++++++++++++++++++- .../azure/storage/javadsl/BlobService.scala | 11 ++++++++++ .../azure/storage/scaladsl/BlobService.scala | 11 ++++++++++ 3 files changed, 41 insertions(+), 1 deletion(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala index 775580ffc8..ca0403efb7 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala @@ -231,6 +231,24 @@ object AzureStorageStream { .mapMaterializedValue(_ => NotUsed) } + private[storage] def createContainer(objectPath: String): Source[Option[ObjectMetadata], NotUsed] = + Source + .fromMaterializer { (mat, attr) => + implicit val system: ActorSystem = mat.system + val settings = resolveSettings(attr, system) + val request = + createRequest( + method = HttpMethods.PUT, + accountName = settings.azureNameKeyCredential.accountName, + storageType = BlobType, + objectPath = objectPath, + queryString = createQueryString(settings, Some("restype=container")), + headers = populateCommonHeaders(HttpEntity.Empty) + ) + handlePutRequest(request, settings) + } + .mapMaterializedValue(_ => NotUsed) + private def handlePutRequest(request: HttpRequest, settings: StorageSettings)(implicit system: ActorSystem) = { import system.dispatcher signAndRequest(request, settings).flatMapConcat { @@ -330,7 +348,7 @@ object AzureStorageStream { overrideContentLength: Option[Long] = None, range: Option[ByteRange] = None, blobType: Option[String] = None, - leaseId: Option[String], + leaseId: Option[String] = None, writeType: Option[String] = None) = { // Azure required to have these two headers (Content-Length & Content-Type) in the request // in some cases Content-Length header must be set as 0 diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala index 33d600cbdf..720f7a7d5f 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala @@ -100,6 +100,7 @@ object BlobService { .asJava /** + * Put blob. * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` * @param contentType content type of the blob @@ -124,4 +125,14 @@ object BlobService { Option(leaseId.orElse(null))) .map(opt => Optional.ofNullable(opt.orNull)) .asJava + + /** + * Create container. + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` + * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def createContainer(objectPath: String): Source[Optional[ObjectMetadata], NotUsed] = + AzureStorageStream.createContainer(objectPath).map(opt => Optional.ofNullable(opt.orNull)).asJava } diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala index 455b315ace..9a0c77fe79 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala @@ -66,6 +66,7 @@ object BlobService { AzureStorageStream.deleteObject(BlobType, objectPath, versionId, leaseId) /** + * Put blob. * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` * @param contentType content type of the blob @@ -82,4 +83,14 @@ object BlobService { blobType: String = "BlockBlob", leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = AzureStorageStream.putBlob(blobType, objectPath, contentType, contentLength, payload, leaseId) + + /** + * Create container. + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` + * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def createContainer(objectPath: String): Source[Option[ObjectMetadata], NotUsed] = + AzureStorageStream.createContainer(objectPath) } From 93c2aab79c7f1e1595fd3ea932297fcc65bb4d6f Mon Sep 17 00:00:00 2001 From: sfali Date: Sun, 25 Aug 2024 13:14:19 -0400 Subject: [PATCH 24/78] support for endpoint URL for local testing #3253 --- .../src/main/resources/reference.conf | 3 ++ .../alpakka/azure/storage/settings.scala | 30 +++++++++++++++++-- .../azure/storage/StorageSettingsSpec.scala | 6 ++++ 3 files changed, 36 insertions(+), 3 deletions(-) diff --git a/azure-storage/src/main/resources/reference.conf b/azure-storage/src/main/resources/reference.conf index f80b13f535..fd4555bb70 100644 --- a/azure-storage/src/main/resources/reference.conf +++ b/azure-storage/src/main/resources/reference.conf @@ -4,6 +4,9 @@ alpakka { api-version = ${?AZURE_STORAGE_API_VERSION} signing-algorithm = "HmacSHA256" + # for local testing via emulator + # endpoint-url = "" + credentials { # valid values are anon (annonymous), SharedKey, SharedKeyLite, sas authorization-type = anon diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala index 3dec2b33de..686ece1e6c 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala @@ -18,6 +18,7 @@ import scala.util.Try final class StorageSettings(val apiVersion: String, val authorizationType: String, + val endPointUrl: Option[String], val azureNameKeyCredential: AzureNameKeyCredential, val sasToken: Option[String], val retrySettings: RetrySettings, @@ -29,6 +30,9 @@ final class StorageSettings(val apiVersion: String, /** Java API */ def getAuthorizationType: String = authorizationType + /** Java API */ + def getEndPointUrl: Optional[String] = endPointUrl.toJava + /** Java API */ def getAzureNameKeyCredential: AzureNameKeyCredential = azureNameKeyCredential @@ -47,12 +51,15 @@ final class StorageSettings(val apiVersion: String, /** Java API */ def withAuthorizationType(authorizationType: String): StorageSettings = copy(authorizationType = authorizationType) + /** Java API */ + def withSasToken(sasToken: String): StorageSettings = copy(sasToken = emptyStringToOption(sasToken)) + /** Java API */ def withAzureNameKeyCredential(azureNameKeyCredential: AzureNameKeyCredential): StorageSettings = copy(azureNameKeyCredential = azureNameKeyCredential) /** Java API */ - def withSasToken(sasToken: String): StorageSettings = copy(sasToken = emptyStringToOption(sasToken)) + def withEndPointUrl(endPointUrl: String): StorageSettings = copy(endPointUrl = emptyStringToOption(endPointUrl)) /** Java API */ def withRetrySettings(retrySettings: RetrySettings): StorageSettings = copy(retrySettings = retrySettings) @@ -88,12 +95,19 @@ final class StorageSettings(val apiVersion: String, private def copy( apiVersion: String = apiVersion, authorizationType: String = authorizationType, + endPointUrl: Option[String] = endPointUrl, azureNameKeyCredential: AzureNameKeyCredential = azureNameKeyCredential, sasToken: Option[String] = sasToken, retrySettings: RetrySettings = retrySettings, algorithm: String = algorithm ) = - StorageSettings(apiVersion, authorizationType, azureNameKeyCredential, sasToken, retrySettings, algorithm) + StorageSettings(apiVersion, + authorizationType, + endPointUrl, + azureNameKeyCredential, + sasToken, + retrySettings, + algorithm) } object StorageSettings { @@ -104,17 +118,25 @@ object StorageSettings { def apply( apiVersion: String, authorizationType: String, + endPointUrl: Option[String], azureNameKeyCredential: AzureNameKeyCredential, sasToken: Option[String], retrySettings: RetrySettings, algorithm: String ): StorageSettings = - new StorageSettings(apiVersion, authorizationType, azureNameKeyCredential, sasToken, retrySettings, algorithm) + new StorageSettings(apiVersion, + authorizationType, + endPointUrl, + azureNameKeyCredential, + sasToken, + retrySettings, + algorithm) /** Java API */ def create( apiVersion: String, authorizationType: String, + endPointUrl: Optional[String], azureNameKeyCredential: AzureNameKeyCredential, sasToken: Optional[String], retrySettings: RetrySettings, @@ -122,6 +144,7 @@ object StorageSettings { ): StorageSettings = StorageSettings(apiVersion, authorizationType, + Option(endPointUrl.orElse(null)), azureNameKeyCredential, Option(sasToken.orElse(null)), retrySettings, @@ -145,6 +168,7 @@ object StorageSettings { StorageSettings( apiVersion = apiVersion, authorizationType = authorizationType, + endPointUrl = config.getOptionalString("endpoint-url"), azureNameKeyCredential = AzureNameKeyCredential(credentials), sasToken = credentials.getOptionalString("sas-token"), retrySettings = retrySettings, diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageSettingsSpec.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageSettingsSpec.scala index 831d14a959..f8a4a978ac 100644 --- a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageSettingsSpec.scala +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageSettingsSpec.scala @@ -124,5 +124,11 @@ class StorageSettingsSpec extends AnyWordSpec with Matchers { val settings = mkSettings("retry-settings.min-backoff=hello") settings.retrySettings shouldBe RetrySettings.Default } + + "populate endpoint URL if provided" in { + val settings = mkSettings("""endpoint-url="http://localhost:1234" """) + settings.endPointUrl shouldBe defined + settings.endPointUrl.get shouldBe "http://localhost:1234" + } } } From 7ac0aa40863216b181f2419572b40a311cb8217d Mon Sep 17 00:00:00 2001 From: sfali Date: Sun, 25 Aug 2024 14:46:50 -0400 Subject: [PATCH 25/78] using end point if defined to create URI #3253 --- .../storage/impl/AzureStorageStream.scala | 92 ++++++++++--------- 1 file changed, 51 insertions(+), 41 deletions(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala index ca0403efb7..2da022385a 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala @@ -56,10 +56,10 @@ object AzureStorageStream { val request = createRequest( method = HttpMethods.GET, - accountName = settings.azureNameKeyCredential.accountName, - storageType = storageType, - objectPath = objectPath, - queryString = createQueryString(settings, versionId.map(value => s"versionId=$value")), + uri = createUri(settings = settings, + storageType = storageType, + objectPath = objectPath, + queryString = createQueryString(settings, versionId.map(value => s"versionId=$value"))), headers = populateCommonHeaders(HttpEntity.Empty, range = range, leaseId = leaseId) ) val objectMetadataMat = Promise[ObjectMetadata]() @@ -93,10 +93,10 @@ object AzureStorageStream { val request = createRequest( method = HttpMethods.HEAD, - accountName = settings.azureNameKeyCredential.accountName, - storageType = storageType, - objectPath = objectPath, - queryString = createQueryString(settings, versionId.map(value => s"versionId=$value")), + uri = createUri(settings = settings, + storageType = storageType, + objectPath = objectPath, + queryString = createQueryString(settings, versionId.map(value => s"versionId=$value"))), headers = populateCommonHeaders(HttpEntity.Empty, leaseId = leaseId) ) @@ -126,10 +126,10 @@ object AzureStorageStream { val request = createRequest( method = HttpMethods.DELETE, - accountName = settings.azureNameKeyCredential.accountName, - storageType = storageType, - objectPath = objectPath, - queryString = createQueryString(settings, versionId.map(value => s"versionId=$value")), + uri = createUri(settings = settings, + storageType = storageType, + objectPath = objectPath, + queryString = createQueryString(settings, versionId.map(value => s"versionId=$value"))), headers = populateCommonHeaders(HttpEntity.Empty, leaseId = leaseId) ) @@ -161,10 +161,10 @@ object AzureStorageStream { val request = createRequest( method = HttpMethods.PUT, - accountName = settings.azureNameKeyCredential.accountName, - storageType = BlobType, - objectPath = objectPath, - queryString = createQueryString(settings), + uri = createUri(settings = settings, + storageType = BlobType, + objectPath = objectPath, + queryString = createQueryString(settings)), headers = populateCommonHeaders(httpEntity, blobType = Some(blobType), leaseId = leaseId) ).withEntity(httpEntity) handlePutRequest(request, settings) @@ -183,10 +183,10 @@ object AzureStorageStream { val request = createRequest( method = HttpMethods.PUT, - accountName = settings.azureNameKeyCredential.accountName, - storageType = FileType, - objectPath = objectPath, - queryString = createQueryString(settings), + uri = createUri(settings = settings, + storageType = FileType, + objectPath = objectPath, + queryString = createQueryString(settings)), headers = Seq( CustomContentTypeHeader(contentType), RawHeader(XMsContentLengthHeaderKey, maxSize.toString), @@ -216,10 +216,10 @@ object AzureStorageStream { val request = createRequest( method = HttpMethods.PUT, - accountName = settings.azureNameKeyCredential.accountName, - storageType = FileType, - objectPath = objectPath, - queryString = createQueryString(settings, Some("comp=range")), + uri = createUri(settings = settings, + storageType = FileType, + objectPath = objectPath, + queryString = createQueryString(settings, Some("comp=range"))), headers = populateCommonHeaders(httpEntity, overrideContentLength, range = Some(range), @@ -239,12 +239,13 @@ object AzureStorageStream { val request = createRequest( method = HttpMethods.PUT, - accountName = settings.azureNameKeyCredential.accountName, - storageType = BlobType, - objectPath = objectPath, - queryString = createQueryString(settings, Some("restype=container")), + uri = createUri(settings = settings, + storageType = BlobType, + objectPath = objectPath, + queryString = createQueryString(settings, Some("restype=container"))), headers = populateCommonHeaders(HttpEntity.Empty) ) + handlePutRequest(request, settings) } .mapMaterializedValue(_ => NotUsed) @@ -370,19 +371,28 @@ object AzureStorageStream { (maybeContentLengthHeader ++ maybeContentTypeHeader ++ maybeRangeHeader ++ maybeBlobTypeHeader ++ maybeLeaseIdHeader ++ maybeWriteTypeHeader).toSeq } - private def createRequest(method: HttpMethod, - accountName: String, - storageType: String, - objectPath: String, - queryString: Option[String], - headers: Seq[HttpHeader]) = - HttpRequest(method = method, uri = createUri(accountName, storageType, objectPath, queryString), headers = headers) - - private def createUri(accountName: String, storageType: String, objectPath: String, queryString: Option[String]) = { - Uri.from(scheme = "https", - host = s"$accountName.$storageType.core.windows.net", - path = Uri.Path(objectPath).toString(), - queryString = queryString) + private def createRequest(method: HttpMethod, uri: Uri, headers: Seq[HttpHeader]) = + HttpRequest(method = method, uri = uri, headers = headers) + + private def createUri(settings: StorageSettings, + storageType: String, + objectPath: String, + queryString: Option[String]) = { + val accountName = settings.azureNameKeyCredential.accountName + val path = if (objectPath.startsWith("/")) objectPath else s"/$objectPath" + settings.endPointUrl + .map { endPointUrl => + val qs = queryString.getOrElse("") + Uri(endPointUrl).withPath(Uri.Path(s"/$accountName$path")).withQuery(Uri.Query(qs)) + } + .getOrElse( + Uri.from( + scheme = "https", + host = s"$accountName.$storageType.core.windows.net", + path = Uri.Path(path).toString(), + queryString = queryString + ) + ) } private def createQueryString(settings: StorageSettings, apiQueryString: Option[String] = None) = { From 9f013cb8f2d35796c80bef4f2af67bac43f0aa86 Mon Sep 17 00:00:00 2001 From: sfali Date: Tue, 27 Aug 2024 09:12:35 -0400 Subject: [PATCH 26/78] move signer out of retry flow, we don't need to sign every time #3253 --- .../alpakka/azure/storage/impl/AzureStorageStream.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala index 2da022385a..bff90d4729 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala @@ -275,7 +275,6 @@ object AzureStorageStream { import system.dispatcher val retriableFlow = Flow[HttpRequest] - .flatMapConcat(req => Signer(req, settings).signedRequest) .mapAsync(parallelism = 1)( req => singleRequest(req) @@ -289,6 +288,7 @@ object AzureStorageStream { import settings.retrySettings._ Source .single(request) + .flatMapConcat(request => Signer(request, settings).signedRequest) .via(RetryFlow.withBackoff(minBackoff, maxBackoff, randomFactor, maxRetries, retriableFlow) { case (request, Success(response)) if isTransientError(response.status) => response.entity.discardBytes() @@ -296,7 +296,7 @@ object AzureStorageStream { case (request, Failure(_)) => // Treat any exception as transient. Some(request) - case _ => None + case a => None }) .mapAsync(1)(Future.fromTry) } From 9b81f84ff152e925cf456589e6bf396de5beb77b Mon Sep 17 00:00:00 2001 From: sfali Date: Tue, 27 Aug 2024 09:13:20 -0400 Subject: [PATCH 27/78] copy right header #3253 --- .../stream/alpakka/azure/storage/StorageExceptionSpec.scala | 4 ++++ .../stream/alpakka/azure/storage/StorageSettingsSpec.scala | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageExceptionSpec.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageExceptionSpec.scala index 3fc450f384..01c8ce750e 100644 --- a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageExceptionSpec.scala +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageExceptionSpec.scala @@ -1,3 +1,7 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + package akka.stream.alpakka package azure package storage diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageSettingsSpec.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageSettingsSpec.scala index f8a4a978ac..85af3656cd 100644 --- a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageSettingsSpec.scala +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageSettingsSpec.scala @@ -1,3 +1,7 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + package akka.stream.alpakka package azure package storage From 3ad11ea3d1a60a84edf26b649bc82258027b1e58 Mon Sep 17 00:00:00 2001 From: sfali Date: Tue, 27 Aug 2024 09:13:53 -0400 Subject: [PATCH 28/78] pass implicit ActorSystem #3253 --- .../main/scala/akka/stream/alpakka/azure/storage/settings.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala index 686ece1e6c..fb602391fd 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala @@ -176,7 +176,7 @@ object StorageSettings { ) } - def apply(system: ClassicActorSystemProvider): StorageSettings = + def apply()(implicit system: ClassicActorSystemProvider): StorageSettings = StorageSettings(system.classicSystem.settings.config.getConfig(ConfigPath)) } From 790fd5a9cda9f1d52d49e3b388140cb833fdd96c Mon Sep 17 00:00:00 2001 From: sfali Date: Tue, 27 Aug 2024 09:14:24 -0400 Subject: [PATCH 29/78] parse content MD5 as well #3253 --- .../stream/alpakka/azure/storage/models.scala | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/models.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/models.scala index a8528e70f6..7b1ea718c4 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/models.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/models.scala @@ -44,6 +44,19 @@ final class ObjectMetadata private (val metadata: Seq[HttpHeader]) { lazy val headers: java.util.List[akka.http.javadsl.model.HttpHeader] = (metadata: Seq[akka.http.javadsl.model.HttpHeader]).asJava + /** + * Content MD5 + */ + lazy val contentMd5: Option[String] = metadata.collectFirst { + case e if e.name == "Content-MD5" => removeQuotes(e.value()) + } + + /** + * Java API + * Content MD5 + */ + lazy val getContentMd5: Optional[String] = contentMd5.toJava + /** Gets the hex encoded 128-bit MD5 digest of the associated object according to RFC 1864. This data is used as an * integrity check to verify that the data received by the caller is the same data that was sent by Azure Storage. *

This field represents the hex encoded 128-bit MD5 digest of an object's content as calculated by Azure @@ -162,7 +175,8 @@ final class ObjectMetadata private (val metadata: Seq[HttpHeader]) { override def toString: String = s"""ObjectMetadata( - |eTag=$eTag, + |contentMd5=$contentMd5 + | eTag=$eTag, | contentLength=$contentLength, | contentType=$contentType, | lastModified=$lastModified From d14055816cf9a0179f8b1ee9696d24b83c600674 Mon Sep 17 00:00:00 2001 From: sfali Date: Tue, 27 Aug 2024 09:16:02 -0400 Subject: [PATCH 30/78] populate environment variables to run test #3253 --- build.sbt | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/build.sbt b/build.sbt index 7a2a148491..385113528b 100644 --- a/build.sbt +++ b/build.sbt @@ -10,6 +10,7 @@ lazy val alpakka = project avroparquetTests, awslambda, azureStorageQueue, + azureStorage, cassandra, couchbase, csv, @@ -138,7 +139,18 @@ lazy val azureStorageQueue = alpakkaProject( Scala3.settings ) -lazy val azureStorage = alpakkaProject("azure-storage", "azure.storage", Dependencies.AzureStorage, Scala3.settings) +lazy val azureStorage = alpakkaProject( + "azure-storage", + "azure.storage", + Dependencies.AzureStorage, + Scala3.settings, + Test / fork := true, + Test / envVars := Map( + "AZURE_STORAGE_AUTHORIZATION_TYPE" -> "SharedKey", + "AZURE_STORAGE_ACCOUNT_NAME" -> "", + "AZURE_STORAGE_ACCOUNT_KEY" -> "" + ) +) lazy val cassandra = alpakkaProject("cassandra", "cassandra", Dependencies.Cassandra) From db681c8ac8953bf0ad83f6abe6bc6c53cdf7ecbc Mon Sep 17 00:00:00 2001 From: sfali Date: Tue, 27 Aug 2024 09:16:26 -0400 Subject: [PATCH 31/78] Integration tests #3253 --- .../test/resources/application-azurite.conf | 23 +++ .../src/test/resources/logback-test.xml | 31 ++++ .../azure/storage/AzuriteContainer.scala | 22 +++ .../scaladsl/AzureIntegrationTest.scala | 18 ++ .../scaladsl/AzuriteIntegrationSpec.scala | 38 +++++ .../scaladsl/StorageIntegrationSpec.scala | 158 ++++++++++++++++++ 6 files changed, 290 insertions(+) create mode 100644 azure-storage/src/test/resources/application-azurite.conf create mode 100644 azure-storage/src/test/resources/logback-test.xml create mode 100644 azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/AzuriteContainer.scala create mode 100644 azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureIntegrationTest.scala create mode 100644 azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzuriteIntegrationSpec.scala create mode 100644 azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageIntegrationSpec.scala diff --git a/azure-storage/src/test/resources/application-azurite.conf b/azure-storage/src/test/resources/application-azurite.conf new file mode 100644 index 0000000000..017bac8b7a --- /dev/null +++ b/azure-storage/src/test/resources/application-azurite.conf @@ -0,0 +1,23 @@ +alpakka { + azure-storage { + api-version = "2024-11-04" + signing-algorithm = "HmacSHA256" + + credentials { + authorization-type = SharedKey + account-name = "devstoreaccount1" + account-key = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" + sas-token = "" + } + } +} + +akka { + http { + client { + parsing { + max-response-reason-length = 512 + } + } + } +} diff --git a/azure-storage/src/test/resources/logback-test.xml b/azure-storage/src/test/resources/logback-test.xml new file mode 100644 index 0000000000..0dd4b9f59c --- /dev/null +++ b/azure-storage/src/test/resources/logback-test.xml @@ -0,0 +1,31 @@ + + + target/s3.log + false + + %d{ISO8601} %-5level [%thread] [%logger{36}] %msg%n + + + + + + %d{HH:mm:ss.SSS} %-5level [%-20.20thread] %-36.36logger{36} %msg%n%rEx + + + + + + + + + + + + + + + + + + + diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/AzuriteContainer.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/AzuriteContainer.scala new file mode 100644 index 0000000000..6f2a9df2c8 --- /dev/null +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/AzuriteContainer.scala @@ -0,0 +1,22 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage + +import com.dimafeng.testcontainers.GenericContainer + +class AzuriteContainer + extends GenericContainer( + "mcr.microsoft.com/azure-storage/azurite:latest", + exposedPorts = Seq(10000, 10001, 10002) + ) { + + def getBlobHostAddress: String = s"http://${container.getContainerIpAddress}:${container.getMappedPort(10000)}" + + def getQueueHostAddress: String = s"http://${container.getContainerIpAddress}:${container.getMappedPort(10001)}" + + def getTableHostAddress: String = s"http://${container.getContainerIpAddress}:${container.getMappedPort(10002)}" +} diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureIntegrationTest.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureIntegrationTest.scala new file mode 100644 index 0000000000..53fce111d7 --- /dev/null +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureIntegrationTest.scala @@ -0,0 +1,18 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package scaladsl + +import akka.actor.ActorSystem +import org.junit.Ignore + +@Ignore +class AzureIntegrationTest extends StorageIntegrationSpec { + + override protected implicit val system: ActorSystem = ActorSystem("AzureIntegrationTest") + +} diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzuriteIntegrationSpec.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzuriteIntegrationSpec.scala new file mode 100644 index 0000000000..52a8b38370 --- /dev/null +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzuriteIntegrationSpec.scala @@ -0,0 +1,38 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package scaladsl + +import akka.actor.ActorSystem +import akka.stream.Attributes +import com.dimafeng.testcontainers.ForAllTestContainer +import com.typesafe.config.ConfigFactory +import org.junit.Ignore + +import scala.concurrent.duration._ +import scala.concurrent.Await + +// TODO: investigate how Azurite works, it is not even working with pure Java API +@Ignore +class AzuriteIntegrationSpec extends StorageIntegrationSpec with ForAllTestContainer { + + override lazy val container: AzuriteContainer = new AzuriteContainer() + + override protected implicit val system: ActorSystem = + ActorSystem("StorageIntegrationSpec", ConfigFactory.load("application-azurite").withFallback(ConfigFactory.load())) + + protected lazy val blobSettings: StorageSettings = StorageSettings().withEndPointUrl(container.getBlobHostAddress) + + override protected def beforeAll(): Unit = { + super.beforeAll() + + val eventualDone = createContainer(defaultContainerName) + Await.result(eventualDone, 10.seconds) + } + + override protected def getDefaultAttributes: Attributes = StorageAttributes.settings(blobSettings) +} diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageIntegrationSpec.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageIntegrationSpec.scala new file mode 100644 index 0000000000..8854127daa --- /dev/null +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageIntegrationSpec.scala @@ -0,0 +1,158 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package scaladsl + +import akka.Done +import akka.actor.ActorSystem +import akka.http.scaladsl.Http +import akka.http.scaladsl.model.ContentTypes +import akka.http.scaladsl.model.headers.ByteRange +import akka.stream.Attributes +import akka.stream.alpakka.testkit.scaladsl.LogCapturing +import akka.stream.scaladsl.{Framing, Keep, Sink, Source} +import akka.testkit.TestKit +import akka.util.ByteString +import org.scalatest.concurrent.ScalaFutures +import org.scalatest.matchers.should.Matchers +import org.scalatest.{BeforeAndAfterAll, OptionValues} +import org.scalatest.wordspec.AnyWordSpecLike + +import java.security.MessageDigest +import java.util.Base64 +import scala.concurrent.duration._ +import scala.concurrent.{ExecutionContext, Future} + +trait StorageIntegrationSpec + extends AnyWordSpecLike + with BeforeAndAfterAll + with Matchers + with ScalaFutures + with OptionValues + with LogCapturing { + + protected val defaultContainerName = "test-container" + protected val fileName = "sample-blob.txt" + protected val sampleText: String = "The quick brown fox jumps over the lazy dog." + System.lineSeparator() + + protected implicit val system: ActorSystem + protected implicit lazy val ec: ExecutionContext = system.dispatcher + override implicit val patienceConfig: PatienceConfig = PatienceConfig(3.minutes, 100.millis) + + override protected def afterAll(): Unit = + Http(system) + .shutdownAllConnectionPools() + .foreach(_ => TestKit.shutdownActorSystem(system)) + + protected def getDefaultAttributes: Attributes = StorageAttributes.settings(StorageSettings()) + + "BlobService" should { + "put blob" in { + val maybeObjectMetadata = + BlobService + .putBlob( + objectPath = s"$defaultContainerName/$fileName", + contentType = ContentTypes.`text/plain(UTF-8)`, + contentLength = sampleText.length, + payload = Source.single(ByteString.fromString(sampleText)) + ) + .withAttributes(getDefaultAttributes) + .toMat(Sink.head)(Keep.right) + .run() + .futureValue + + maybeObjectMetadata shouldBe defined + val objectMetadata = maybeObjectMetadata.get + objectMetadata.contentMd5 shouldBe Some(calculateDigest(sampleText)) + } + + "get blob" in { + val (maybeEventualObjectMetadata, eventualText) = + BlobService + .getBlob(s"$defaultContainerName/$fileName") + .withAttributes(getDefaultAttributes) + .via(Framing.delimiter(ByteString(System.lineSeparator()), 256, allowTruncation = true)) + .map(byteString => byteString.utf8String + System.lineSeparator()) + .toMat(Sink.seq)(Keep.both) + .run() + + val objectMetadata = maybeEventualObjectMetadata.futureValue + objectMetadata.contentMd5 shouldBe Some(calculateDigest(sampleText)) + objectMetadata.contentLength shouldBe sampleText.length + eventualText.futureValue.head shouldBe sampleText + } + + "get blob properties" in { + val maybeObjectMetadata = + BlobService + .getProperties(s"$defaultContainerName/$fileName") + .withAttributes(getDefaultAttributes) + .toMat(Sink.head)(Keep.right) + .run() + .futureValue + + maybeObjectMetadata shouldBe defined + val objectMetadata = maybeObjectMetadata.get + objectMetadata.contentMd5 shouldBe Some(calculateDigest(sampleText)) + objectMetadata.contentLength shouldBe sampleText.length + } + + "get blob range" in { + val range = ByteRange.Slice(0, 8) + val (maybeEventualObjectMetadata, eventualText) = + BlobService + .getBlob(s"$defaultContainerName/$fileName", Some(range)) + .withAttributes(getDefaultAttributes) + .via(Framing.delimiter(ByteString(System.lineSeparator()), 256, allowTruncation = true)) + .map(_.utf8String) + .toMat(Sink.seq)(Keep.both) + .run() + + val objectMetadata = maybeEventualObjectMetadata.futureValue + objectMetadata.contentLength shouldBe (range.last - range.first + 1) + eventualText.futureValue.head shouldBe "The quick" + } + + "delete blob" in { + val maybeObjectMetadata = + BlobService + .deleteBlob(s"$defaultContainerName/$fileName") + .withAttributes(getDefaultAttributes) + .toMat(Sink.head)(Keep.right) + .run() + .futureValue + + maybeObjectMetadata.get.contentLength shouldBe 0 + } + + "get blob after delete" in { + val maybeObjectMetadata = + BlobService + .getProperties(s"$defaultContainerName/$fileName") + .withAttributes(getDefaultAttributes) + .toMat(Sink.head)(Keep.right) + .run() + .futureValue + + maybeObjectMetadata shouldBe empty + } + } + + protected def createContainer(containerName: String): Future[Done] = { + BlobService + .createContainer(containerName) + .withAttributes(getDefaultAttributes) + .runWith(Sink.ignore) + } + + private def calculateDigest(text: String) = { + val digest = MessageDigest.getInstance("MD5") + digest.update(text.getBytes) + val bytes = digest.digest() + Base64.getEncoder.encodeToString(bytes) + } +} From a46d27cdda1b8071270b9689a63128f186e1c127 Mon Sep 17 00:00:00 2001 From: sfali Date: Tue, 27 Aug 2024 12:58:01 -0400 Subject: [PATCH 32/78] Added documentation for storage connector #3253 --- .../src/main/resources/reference.conf | 2 + .../scala/docs/scaladsl/StorageTest.scala | 256 ++++++++++++++++++ docs/src/main/paradox/azure-storage.md | 177 ++++++++++++ docs/src/main/paradox/index.md | 1 + 4 files changed, 436 insertions(+) create mode 100644 azure-storage/src/test/scala/docs/scaladsl/StorageTest.scala create mode 100644 docs/src/main/paradox/azure-storage.md diff --git a/azure-storage/src/main/resources/reference.conf b/azure-storage/src/main/resources/reference.conf index fd4555bb70..32d29afe59 100644 --- a/azure-storage/src/main/resources/reference.conf +++ b/azure-storage/src/main/resources/reference.conf @@ -7,6 +7,7 @@ alpakka { # for local testing via emulator # endpoint-url = "" + #azure-credentials credentials { # valid values are anon (annonymous), SharedKey, SharedKeyLite, sas authorization-type = anon @@ -24,6 +25,7 @@ alpakka { sas-token = "" sas-token = ${?AZURE_STORAGE_SAS_TOKEN} } + #azure-credentials # Default settings corresponding to automatic retry of requests in an S3 stream. retry-settings { diff --git a/azure-storage/src/test/scala/docs/scaladsl/StorageTest.scala b/azure-storage/src/test/scala/docs/scaladsl/StorageTest.scala new file mode 100644 index 0000000000..e8809c5fb2 --- /dev/null +++ b/azure-storage/src/test/scala/docs/scaladsl/StorageTest.scala @@ -0,0 +1,256 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package docs.scaladsl + +import akka.NotUsed +import akka.actor.ActorSystem +import akka.http.scaladsl.model.ContentTypes +import akka.http.scaladsl.model.headers.ByteRange +import akka.stream.alpakka.azure.storage.scaladsl.StorageIntegrationSpec +import akka.stream.scaladsl.{Keep, Sink, Source} +import akka.util.ByteString + +import scala.concurrent.Future + +class StorageTest extends StorageIntegrationSpec { + + override protected implicit val system: ActorSystem = ActorSystem("StorageSystem") + + "AzureStorage Blob connector" should { + + "create container" in { + + //#create-container + import akka.stream.alpakka.azure.storage.scaladsl.BlobService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[Option[ObjectMetadata], NotUsed] = BlobService.createContainer("my-container") + + val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.toMat(Sink.head)(Keep.right).run() + //#create-container + + val metaData = eventualMaybeMetadata.futureValue + + // TODO: validate + println(metaData) + } + + "put blob" in { + + //#put-blob + import akka.stream.alpakka.azure.storage.scaladsl.BlobService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val sampleText = "some random text" + val contentLength = sampleText.length + val payload = Source.single(ByteString.fromString(sampleText)) + val source: Source[Option[ObjectMetadata], NotUsed] = + BlobService.putBlob( + objectPath = "my-container/my-blob.txt", + contentType = ContentTypes.`text/plain(UTF-8)`, + contentLength = contentLength, + payload = payload + ) + + val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.toMat(Sink.head)(Keep.right).run() + //#put-blob + + val metaData = eventualMaybeMetadata.futureValue + + // TODO: validate + println(metaData) + } + + "get blob" in { + + //#get-blob + import akka.stream.alpakka.azure.storage.scaladsl.BlobService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[ByteString, Future[ObjectMetadata]] = + BlobService.getBlob(objectPath = "my-container/my-blob.txt") + + val eventualText: Future[ByteString] = source.toMat(Sink.head)(Keep.right).run() + //#get-blob + + val text = eventualText.futureValue + + // TODO: validate + println(text) + } + + "get blob range" in { + + //#get-blob-range + import akka.stream.alpakka.azure.storage.scaladsl.BlobService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[ByteString, Future[ObjectMetadata]] = + BlobService.getBlob(objectPath = "my-container/my-blob.txt", range = Some(ByteRange.Slice(2, 5))) + + val eventualText: Future[ByteString] = source.toMat(Sink.head)(Keep.right).run() + //#get-blob-range + + val text = eventualText.futureValue + + // TODO: validate + println(text) + } + + "get blob properties" in { + + //#get-blob-properties + import akka.stream.alpakka.azure.storage.scaladsl.BlobService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[Option[ObjectMetadata], NotUsed] = + BlobService.getProperties(objectPath = "my-container/my-blob.txt") + + val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.toMat(Sink.head)(Keep.right).run() + //#get-blob-properties + + val maybeMetadata = eventualMaybeMetadata.futureValue + + // TODO: validate + println(maybeMetadata) + } + + "delete blob" in { + + //#delete-blob + import akka.stream.alpakka.azure.storage.scaladsl.BlobService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[Option[ObjectMetadata], NotUsed] = + BlobService.deleteBlob(objectPath = "my-container/my-blob.txt") + + val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.toMat(Sink.head)(Keep.right).run() + //#delete-blob + + val maybeMetadata = eventualMaybeMetadata.futureValue + + // TODO: validate + println(maybeMetadata) + } + } + + "AzureStorage File connector" should { + + "create file" in { + + //#create-file + import akka.stream.alpakka.azure.storage.scaladsl.FileService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[Option[ObjectMetadata], NotUsed] = + FileService.createFile(objectPath = "/my-directory/my-file.txt", + contentType = ContentTypes.`text/plain(UTF-8)`, + maxSize = 44) + + val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.toMat(Sink.head)(Keep.right).run() + //#create-file + + val metaData = eventualMaybeMetadata.futureValue + + // TODO: validate + println(metaData) + } + + "update range" in { + + //#update-range + import akka.stream.alpakka.azure.storage.scaladsl.FileService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val sampleText = "The quick brown fox jumps over the lazy dog." + val payload = Source.single(ByteString.fromString(sampleText)) + val source: Source[Option[ObjectMetadata], NotUsed] = + FileService.updateRange(objectPath = "/my-directory/my-file.txt", + contentType = ContentTypes.`text/plain(UTF-8)`, + range = ByteRange.Slice(0, sampleText.length - 1), + payload = payload) + + val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.toMat(Sink.head)(Keep.right).run() + //#update-range + + val metaData = eventualMaybeMetadata.futureValue + + // TODO: validate + println(metaData) + } + + "get file" in { + + //#get-file + import akka.stream.alpakka.azure.storage.scaladsl.FileService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[ByteString, Future[ObjectMetadata]] = + FileService.getFile(objectPath = "/my-directory/my-file.txt") + + val eventualText: Future[ByteString] = source.toMat(Sink.head)(Keep.right).run() + //#get-file + + val text = eventualText.futureValue + + // TODO: validate + println(text) + } + + "get file properties" in { + + //#get-file-properties + import akka.stream.alpakka.azure.storage.scaladsl.FileService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[Option[ObjectMetadata], NotUsed] = + FileService.getProperties(objectPath = "/my-directory/my-file.txt") + + val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.toMat(Sink.head)(Keep.right).run() + //#get-file-properties + + val metaData = eventualMaybeMetadata.futureValue + + // TODO: validate + println(metaData) + } + + "clear range" in { + + //#clear-range + import akka.stream.alpakka.azure.storage.scaladsl.FileService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[Option[ObjectMetadata], NotUsed] = + FileService.clearRange(objectPath = "/my-directory/my-file.txt", range = ByteRange.Slice(5, 11)) + + val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.toMat(Sink.head)(Keep.right).run() + //#clear-range + + val metaData = eventualMaybeMetadata.futureValue + + // TODO: validate + println(metaData) + } + + "delete file" in { + + //#delete-file + import akka.stream.alpakka.azure.storage.scaladsl.FileService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[Option[ObjectMetadata], NotUsed] = + FileService.deleteFile(objectPath = "/my-directory/my-file.txt") + + val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.toMat(Sink.head)(Keep.right).run() + //#delete-file + + val metaData = eventualMaybeMetadata.futureValue + + // TODO: validate + println(metaData) + } + } +} diff --git a/docs/src/main/paradox/azure-storage.md b/docs/src/main/paradox/azure-storage.md new file mode 100644 index 0000000000..21b0c2cadf --- /dev/null +++ b/docs/src/main/paradox/azure-storage.md @@ -0,0 +1,177 @@ +# Azure Storage + +Azure Storage connector provides Akka Stream Source for Azure Storage. Currently only supports `Blob` and `File` services. For detail about these services please read [Azure docs](https://learn.microsoft.com/en-us/rest/api/storageservices/). + +@@project-info{ projectId="azure-storage" } + +## Artifacts + +The Akka dependencies are available from Akka's library repository. To access them there, you need to configure the URL for this repository. + +@@repository [sbt,Maven,Gradle] { +id="akka-repository" +name="Akka library repository" +url="https://repo.akka.io/maven" +} + +Additionally, add the dependencies as below. + +@@dependency [sbt,Maven,Gradle] { +group=com.lightbend.akka +artifact=akka-stream-alpakka-s3_$scala.binary.version$ +version=$project.version$ +symbol2=AkkaVersion +value2=$akka.version$ +group2=com.typesafe.akka +artifact2=akka-stream_$scala.binary.version$ +version2=AkkaVersion +symbol3=AkkaHttpVersion +value3=$akka-http.version$ +group3=com.typesafe.akka +artifact3=akka-http_$scala.binary.version$ +version3=AkkaHttpVersion +group4=com.typesafe.akka +artifact4=akka-http-xml_$scala.binary.version$ +version4=AkkaHttpVersion +} + +The table below shows direct dependencies of this module and the second tab shows all libraries it depends on transitively. + +@@dependencies { projectId="azure-storage" } + +## Configuration + +The settings for the Azure Storage connector are read by default from `alpakka.azure-storage` configuration section. Credentials are defined in `credentials` section of [`reference.conf`](/azure-storage/src/main/resources/reference.conf). + +Scala +: @@snip [snip](/azure-storage/src/main/resources/reference.conf) { #azure-credentials } + +Java +: @@snip [snip](/azure-storage/src/main/resources/reference.conf) { #azure-credentials } + +At minimum following configurations needs to be set: + +* `authorization-type`, this is the type of authorization to use as described [here](https://learn.microsoft.com/en-us/rest/api/storageservices/authorize-requests-to-azure-storage), possible values are `anon`, `SharedKey`, `SharedKeyLite`, or `sas`. Environment variable `AZURE_STORAGE_AUTHORIZATION_TYPE` can be set to override this configuration. +* `account-name`, this is the name of the blob storage or file share. Environment variable `AZURE_STORAGE_ACCOUNT_NAME` can be set to override this configuration. +* `account-key`, Account key to use to create authorization signature, mandatory for `SharedKey` or `SharedKeyLite` authorization types, as described [here](https://learn.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key). Environment variable `AZURE_STORAGE_ACCOUNT_KEY` can be set to override this configuration. +* `sas-token` if authorization type is `sas`. Environment variable `AZURE_STORAGE_SAS_TOKEN` can be set to override this configuration. + +## Supported operations on Blob service + +### Create Container + +The [`Create Container`](https://learn.microsoft.com/en-us/rest/api/storageservices/create-container) operation creates a new container under the specified account. + +Scala +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #create-container } + +Java +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #create-container } + +### Put Blob + +The [`Put Blob`](https://learn.microsoft.com/en-us/rest/api/storageservices/put-blob) operation creates a new block, page, or append blob, or updates the content of an existing block blob. + +Scala +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #put-blob } + +Java +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #put-blob } + +### Get Blob + +The [`Get Blob`](https://learn.microsoft.com/en-us/rest/api/storageservices/get-blob) operation reads or downloads a blob from the system, including its metadata and properties. + +Scala +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #get-blob } + +Java +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #get-blob } + +In order to download a range of a file's data you can use overloaded method which additionally takes `ByteRange` as argument. + +Scala +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #get-blob-range } + +Java +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #get-blob-range } + +### Get blob properties without downloading blob + +The [`Get Blob Properties`](https://learn.microsoft.com/en-us/rest/api/storageservices/get-blob-properties) operation returns all user-defined metadata, standard HTTP properties, and system properties for the blob. (**Note:** Current implementation does not return user-defined metatdata.) + +Scala +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #get-blob-properties } + +Java +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #get-blob-properties } + +### Delete Blob + +The [`Delete Blob`](https://learn.microsoft.com/en-us/rest/api/storageservices/delete-blob) operation deletes the specified blob. + +Scala +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #delete-blob } + +Java +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #delte-blob } + +## Supported operations on File service + +### Create File + +The [`Create File`](https://learn.microsoft.com/en-us/rest/api/storageservices/create-file) operation creates a new file or replaces a file. + +Scala +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #create-file } + +Java +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #create-file } + +### Update Range + +The [`Update Range`](https://learn.microsoft.com/en-us/rest/api/storageservices/put-range) operation writes a range of bytes to a file. + +Scala +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #update-range } + +Java +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #update-range } + +Range can be cleared using `ClearRange` function. + +Scala +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #clear-range } + +Java +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #clear-range } + +### Get File + +The [`Get File`](https://learn.microsoft.com/en-us/rest/api/storageservices/get-file) operation reads or downloads a file from the system, including its metadata and properties. + +Scala +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #get-file } + +Java +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #get-file } + +### Get file properties without downloading blob + +The [`Get File Properties`](https://learn.microsoft.com/en-us/rest/api/storageservices/get-file-properties) operation returns all user-defined metadata, standard HTTP properties, and system properties for the file. (**Note:** Current implementation does not return user-defined metatdata.) + +Scala +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #get-file-properties } + +Java +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #get-file-properties } + +### Delete Blob + +The [`Delete File`](https://learn.microsoft.com/en-us/rest/api/storageservices/delete-file) operation immediately removes the file from the storage account. + +Scala +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #delete-file } + +Java +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #delte-file } diff --git a/docs/src/main/paradox/index.md b/docs/src/main/paradox/index.md index 16ef1f709b..7a353f3a3d 100644 --- a/docs/src/main/paradox/index.md +++ b/docs/src/main/paradox/index.md @@ -23,6 +23,7 @@ The [Alpakka project](https://doc.akka.io/docs/alpakka/current/) is an initiativ * [AWS S3](s3.md) * [AWS SNS](sns.md) * [AWS SQS](sqs.md) +* [Azure Storage](azure-storage.md) * [Azure Storage Queue](azure-storage-queue.md) * [Couchbase](couchbase.md) * [Elasticsearch](elasticsearch.md) From a9f31eabe6ce7f23fc3270d3d5035b01b4f27ec6 Mon Sep 17 00:00:00 2001 From: sfali Date: Tue, 27 Aug 2024 13:00:51 -0400 Subject: [PATCH 33/78] setting mimaPreviousArtifacts to make build work #3253 --- build.sbt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.sbt b/build.sbt index 385113528b..be4bf56763 100644 --- a/build.sbt +++ b/build.sbt @@ -150,7 +150,7 @@ lazy val azureStorage = alpakkaProject( "AZURE_STORAGE_ACCOUNT_NAME" -> "", "AZURE_STORAGE_ACCOUNT_KEY" -> "" ) -) +).settings(mimaPreviousArtifacts := Set.empty) // FIXME remove after first release lazy val cassandra = alpakkaProject("cassandra", "cassandra", Dependencies.Cassandra) From 9c1dd26a888ba63c06d2a0d4b93901c271315f8d Mon Sep 17 00:00:00 2001 From: sfali Date: Wed, 28 Aug 2024 12:13:54 -0400 Subject: [PATCH 34/78] Added missing endPointUrl #3253 --- .../main/scala/akka/stream/alpakka/azure/storage/settings.scala | 2 ++ 1 file changed, 2 insertions(+) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala index fb602391fd..3e3fef9247 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala @@ -71,6 +71,7 @@ final class StorageSettings(val apiVersion: String, s"""StorageSettings( | apiVersion=$apiVersion, | authorizationType=$authorizationType, + | endPointUrl=$endPointUrl, | azureNameKeyCredential=$azureNameKeyCredential, | sasToken=$sasToken | retrySettings=$retrySettings, @@ -81,6 +82,7 @@ final class StorageSettings(val apiVersion: String, case that: StorageSettings => apiVersion == that.apiVersion && authorizationType == that.authorizationType && + endPointUrl == that.endPointUrl && Objects.equals(azureNameKeyCredential, that.azureNameKeyCredential) && sasToken == that.sasToken && Objects.equals(retrySettings, that.retrySettings) && From 557694056996772a6d2c84b98e1c2c79285b8dd4 Mon Sep 17 00:00:00 2001 From: sfali Date: Wed, 28 Aug 2024 12:15:12 -0400 Subject: [PATCH 35/78] minor return types fix #3253 --- .../alpakka/azure/storage/impl/AzureStorageStream.scala | 4 ++-- .../stream/alpakka/azure/storage/javadsl/FileService.scala | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala index bff90d4729..8656934980 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala @@ -202,7 +202,7 @@ object AzureStorageStream { contentType: ContentType, range: ByteRange.Slice, payload: Option[Source[ByteString, _]], - leaseId: Option[String]): Source[Option[ObjectMetadata], NotUsed.type] = { + leaseId: Option[String]): Source[Option[ObjectMetadata], NotUsed] = { Source .fromMaterializer { (mat, attr) => implicit val system: ActorSystem = mat.system @@ -296,7 +296,7 @@ object AzureStorageStream { case (request, Failure(_)) => // Treat any exception as transient. Some(request) - case a => None + case _ => None }) .mapAsync(1)(Future.fromTry) } diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala index 53a87f6259..fe6c22898c 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala @@ -135,7 +135,7 @@ object FileService { contentType: ContentType, range: Slice, payload: Source[ByteString, _], - leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed.type] = + leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream .updateOrClearRange(objectPath, contentType.asInstanceOf[ScalaContentType], @@ -156,7 +156,7 @@ object FileService { */ def clearRange(objectPath: String, range: Slice, - leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed.type] = + leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream .updateOrClearRange(objectPath, ContentTypes.NO_CONTENT_TYPE.asInstanceOf[ScalaContentType], From b281abb9d4be05df742628809f3b2b745e0c3f9f Mon Sep 17 00:00:00 2001 From: sfali Date: Wed, 28 Aug 2024 12:16:06 -0400 Subject: [PATCH 36/78] base class for wire mock #3253 --- .../scaladsl/StorageWireMockBase.scala | 199 ++++++++++++++++++ 1 file changed, 199 insertions(+) create mode 100644 azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala new file mode 100644 index 0000000000..5f2212d092 --- /dev/null +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala @@ -0,0 +1,199 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package scaladsl + +import akka.actor.ActorSystem +import akka.http.scaladsl.model.headers._ +import StorageWireMockBase.{config, getCallerName, initServer, AccountName} +import akka.http.scaladsl.model.ContentTypes +import akka.testkit.TestKit +import com.github.tomakehurst.wiremock.WireMockServer +import com.github.tomakehurst.wiremock.client.WireMock +import com.github.tomakehurst.wiremock.client.WireMock._ +import com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig +import com.github.tomakehurst.wiremock.http.trafficlistener.ConsoleNotifyingWiremockNetworkTrafficListener +import com.github.tomakehurst.wiremock.matching.StringValuePattern +import com.github.tomakehurst.wiremock.stubbing.StubMapping +import com.typesafe.config.{Config, ConfigFactory} + +abstract class StorageWireMockBase(_system: ActorSystem, val _wireMockServer: WireMockServer) extends TestKit(_system) { + + import StorageWireMockBase._ + + private val port = _wireMockServer.port() + protected val mock = new WireMock("localhost", port) + + // test data + protected val containerName = "my-container" + protected val blobName = "my-blob.txt" + protected val payload = "The quick brown fox jumps over the lazy dog." + protected val contentLength: Long = payload.length.toLong + protected val contentRange: ByteRange.Slice = ByteRange(0, contentLength - 1) + protected val subRange: ByteRange.Slice = ByteRange(4, 8) + + private def this(mock: WireMockServer) = + this( + ActorSystem(getCallerName(StorageWireMockBase.getClass), config(mock.port()).withFallback(ConfigFactory.load())), + mock + ) + + def this() = { + this(initServer()) + system.registerOnTermination(stopWireMockServer()) + } + + protected def mockCreateContainer(): StubMapping = + mock.register( + put(urlEqualTo(s"/$AccountName/$containerName?restype=container")) + .willReturn( + aResponse() + .withStatus(201) + .withHeader(ETag.name, ETagValue) + .withHeader(`Content-Length`.name, "0") + ) + ) + + protected def mockPutBlob(): StubMapping = + mock.register( + put(urlEqualTo(s"/$AccountName/$containerName/$blobName")) + .withRequestBody(equalTo(payload)) + .willReturn( + aResponse() + .withStatus(201) + .withHeader(ETag.name, ETagValue) + .withHeader(`Content-Length`.name, payload.length.toString) + .withHeader(`Content-Type`.name, "text/plain; charset=UTF-8") + ) + ) + + protected def mockGetBlob(): StubMapping = + mock.register( + get(urlEqualTo(s"/$AccountName/$containerName/$blobName")) + .willReturn( + aResponse() + .withStatus(200) + .withHeader(ETag.name, ETagValue) + .withBody(payload) + ) + ) + + protected def mockGetBlobWithRange(): StubMapping = + mock.register( + get(urlEqualTo(s"/$AccountName/$containerName/$blobName")) + .withHeader(Range.name, equalTo(s"bytes=${subRange.first}-${subRange.last}")) + .willReturn( + aResponse() + .withStatus(200) + .withHeader(ETag.name, ETagValue) + .withBody(payload.substring(subRange.first.toInt, subRange.last.toInt + 1)) + ) + ) + + protected def mockGetBlobProperties(): StubMapping = + mock.register( + head(urlEqualTo(s"/$AccountName/$containerName/$blobName")) + .willReturn( + aResponse() + .withStatus(200) + .withHeader(ETag.name, ETagValue) + .withHeader(`Content-Length`.name, payload.length.toString) + .withHeader(`Content-Type`.name, "text/plain; charset=UTF-8") + ) + ) + + protected def mockDeleteBlob(): StubMapping = + mock.register( + delete(urlEqualTo(s"/$AccountName/$containerName/$blobName")) + .willReturn( + aResponse() + .withStatus(202) + .withHeader(ETag.name, ETagValue) + ) + ) + + protected def mockCreateFile(): StubMapping = + mock.register( + put(urlEqualTo(s"/$AccountName/$containerName/$blobName")) + .withHeader(XMsContentLengthHeaderKey, equalTo(contentLength.toString)) + .withHeader(FileTypeHeaderKey, equalTo("file")) + .willReturn( + aResponse() + .withStatus(201) + .withHeader(ETag.name, ETagValue) + .withHeader(`Content-Length`.name, "0") + ) + ) + + protected def mockUpdateRange(): StubMapping = + mock.register( + put(urlEqualTo(s"/$AccountName/$containerName/$blobName?comp=range")) + .withHeader(Range.name, equalTo(s"bytes=0-${contentLength - 1}")) + .withHeader(FileWriteTypeHeaderKey, equalTo("update")) + .withRequestBody(equalTo(payload)) + .willReturn( + aResponse() + .withStatus(201) + .withHeader(ETag.name, ETagValue) + .withHeader(`Content-Length`.name, "0") + ) + ) + + protected def mockClearRange(): StubMapping = + mock.register( + put(urlEqualTo(s"/$AccountName/$containerName/$blobName?comp=range")) + .withHeader(`Content-Type`.name, equalTo(ContentTypes.NoContentType.toString())) + .withHeader(Range.name, equalTo(s"bytes=${subRange.first}-${subRange.last}")) + .withHeader(FileWriteTypeHeaderKey, equalTo("clear")) + .willReturn( + aResponse() + .withStatus(201) + .withHeader(ETag.name, ETagValue) + .withHeader(`Content-Length`.name, "0") + ) + ) + + private def stopWireMockServer(): Unit = _wireMockServer.stop() +} + +object StorageWireMockBase { + + val AccountName = "teststoreaccount" + val ETagRawValue = "fba9dede5f27731c9771645a39863328" + val ETagValue = s""""$ETagRawValue"""" + + def initServer(): WireMockServer = { + val server = new WireMockServer( + wireMockConfig() + .dynamicPort() + ) + server.start() + server + } + + def getCallerName(clazz: Class[_]): String = { + val s = (Thread.currentThread.getStackTrace map (_.getClassName) drop 1) + .dropWhile(_ matches "(java.lang.Thread|.*WireMockBase.?$)") + val reduced = s.lastIndexWhere(_ == clazz.getName) match { + case -1 => s + case z => s drop (z + 1) + } + reduced.head.replaceFirst(""".*\.""", "").replaceAll("[^a-zA-Z_0-9]", "_") + } + + def config(proxyPort: Int): Config = ConfigFactory.parseString(s""" + |akka.http.client.log-unencrypted-network-bytes = 1000 + |akka.http.parsing.max-to-strict-bytes=infinite + |${StorageSettings.ConfigPath} { + | endpoint-url = "http://localhost:$proxyPort" + | credentials { + | authorization-type = anon + | account-name = $AccountName + | } + |} + |""".stripMargin) +} From e0afb817b605944fba241cdaecf1a76ebd46246e Mon Sep 17 00:00:00 2001 From: sfali Date: Wed, 28 Aug 2024 12:17:07 -0400 Subject: [PATCH 37/78] Scala and Java mock tests for documentation #3253 --- .../test/java/docs/javadsl/StorageTest.java | 283 +++++++++++++++++ .../scala/docs/scaladsl/StorageSpec.scala | 291 ++++++++++++++++++ .../scala/docs/scaladsl/StorageTest.scala | 256 --------------- 3 files changed, 574 insertions(+), 256 deletions(-) create mode 100644 azure-storage/src/test/java/docs/javadsl/StorageTest.java create mode 100644 azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala delete mode 100644 azure-storage/src/test/scala/docs/scaladsl/StorageTest.scala diff --git a/azure-storage/src/test/java/docs/javadsl/StorageTest.java b/azure-storage/src/test/java/docs/javadsl/StorageTest.java new file mode 100644 index 0000000000..50e056a17f --- /dev/null +++ b/azure-storage/src/test/java/docs/javadsl/StorageTest.java @@ -0,0 +1,283 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package docs.javadsl; + +import akka.NotUsed; +import akka.actor.ActorSystem; +import akka.http.javadsl.Http; +import akka.http.javadsl.model.ContentTypes; +import akka.stream.alpakka.azure.storage.ObjectMetadata; +import akka.stream.alpakka.azure.storage.javadsl.BlobService; +import akka.stream.alpakka.azure.storage.javadsl.FileService; +import akka.stream.alpakka.azure.storage.scaladsl.StorageWireMockBase; +import akka.stream.alpakka.testkit.javadsl.LogCapturingJunit4; +import akka.stream.javadsl.Source; +import akka.stream.javadsl.Sink; +import akka.testkit.javadsl.TestKit; +import akka.util.ByteString; +import com.github.tomakehurst.wiremock.WireMockServer; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Rule; +import org.junit.Test; + +import java.util.List; +import java.util.Optional; +import java.util.concurrent.CompletionStage; +import java.util.stream.Collectors; + +public class StorageTest extends StorageWireMockBase { + + @Rule + public final LogCapturingJunit4 logCapturing = new LogCapturingJunit4(); + + private static ActorSystem system; + private static WireMockServer wireMockServerForShutdown; + + @Before + public void before() { + wireMockServerForShutdown = _wireMockServer(); + system = system(); + } + + @AfterClass + public static void afterAll() { + wireMockServerForShutdown.stop(); + Http.get(system) + .shutdownAllConnectionPools() + .thenRun(() -> TestKit.shutdownActorSystem(system)); + } + + + @Test + public void createContainer() throws Exception { + mockCreateContainer(); + + //#create-container + final Source, NotUsed> source = BlobService.createContainer(containerName()); + + final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); + //#create-container + + final var optionalObjectMetadata = optionalCompletionStage.toCompletableFuture().get(); + Assert.assertTrue(optionalObjectMetadata.isPresent()); + final var objectMetadata = optionalObjectMetadata.get(); + Assert.assertEquals(objectMetadata.getContentLength(), 0L); + Assert.assertEquals(objectMetadata.getETag().get(), ETagRawValue()); + } + + + // TODO: There are couple of issues, firstly there are two `Content-Length` headers being added, one by `putBlob` + // function and secondly by, most likely, by WireMock. Need to to figure out how to tell WireMock not to add `Content-Length` + // header, secondly once that resolve then we get `akka.http.scaladsl.model.EntityStreamException`. + @Ignore("Fix this test case") + @Test + public void putBlob() throws Exception { + mockPutBlob(); + + //#put-blob + final Source, NotUsed> source = + BlobService.putBlob(containerName() + "/" + blobName(), + ContentTypes.TEXT_PLAIN_UTF8, + contentLength(), + Source.single(ByteString.fromString(payload())), + "BlockBlob", Optional.empty()); + + final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); + //#put-blob + + final var optionalObjectMetadata = optionalCompletionStage.toCompletableFuture().get(); + Assert.assertTrue(optionalObjectMetadata.isPresent()); + } + + @Test + public void getBlob() throws Exception { + mockGetBlob(); + + //#get-blob + final Source> source = + BlobService.getBlob(containerName() + "/" + blobName(), Optional.empty(), Optional.empty()); + + final CompletionStage> eventualPayload = source.runWith(Sink.seq(), system); + //#get-blob + + final var actualPayload = eventualPayload.toCompletableFuture().get().stream() + .map(ByteString::utf8String).collect(Collectors.joining()); + Assert.assertEquals(actualPayload, payload()); + } + + @Test + public void getBlobRange() throws Exception { + mockGetBlobWithRange(); + + //#get-blob-range + final Source> source = + BlobService.getBlob(containerName() + "/" + blobName(), subRange(), + Optional.empty(), Optional.empty()); + + final CompletionStage> eventualPayload = source.runWith(Sink.seq(), system); + //#get-blob-range + + final var actualPayload = eventualPayload.toCompletableFuture().get().stream() + .map(ByteString::utf8String).collect(Collectors.joining()); + Assert.assertEquals("quick", actualPayload); + } + + @Test + public void getBlobProperties() throws Exception { + mockGetBlobProperties(); + + //#get-blob-properties + final Source, NotUsed> source = + BlobService.getProperties(containerName() + "/" + blobName(), Optional.empty(), Optional.empty()); + + final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); + //#get-blob-properties + + final var maybeObjectMetadata = optionalCompletionStage.toCompletableFuture().get(); + Assert.assertTrue(maybeObjectMetadata.isPresent()); + final var objectMetadata = maybeObjectMetadata.get(); + Assert.assertEquals(Optional.of(ETagRawValue()), objectMetadata.getETag()); + Assert.assertEquals(contentLength(), objectMetadata.getContentLength()); + Assert.assertEquals(Optional.of(ContentTypes.TEXT_PLAIN_UTF8.toString()), objectMetadata.getContentType()); + } + + @Test + public void deleteBlob() throws Exception { + mockDeleteBlob(); + + //#delete-blob + final Source, NotUsed> source = + BlobService.deleteBlob(containerName() + "/" + blobName(), Optional.empty(), Optional.empty()); + + final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); + //#delete-blob + + final var maybeObjectMetadata = optionalCompletionStage.toCompletableFuture().get(); + Assert.assertTrue(maybeObjectMetadata.isPresent()); + final var objectMetadata = maybeObjectMetadata.get(); + Assert.assertEquals(Optional.of(ETagRawValue()), objectMetadata.getETag()); + Assert.assertEquals(0L, objectMetadata.getContentLength()); + } + + @Test + public void createFile() throws Exception { + mockCreateFile(); + + //#create-file + final Source, NotUsed> source = + FileService.createFile(containerName() + "/" + blobName(), ContentTypes.TEXT_PLAIN_UTF8, contentLength(), Optional.empty()); + + final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); + //#create-file + + final var maybeObjectMetadata = optionalCompletionStage.toCompletableFuture().get(); + Assert.assertTrue(maybeObjectMetadata.isPresent()); + final var objectMetadata = maybeObjectMetadata.get(); + Assert.assertEquals(Optional.of(ETagRawValue()), objectMetadata.getETag()); + Assert.assertEquals(0L, objectMetadata.getContentLength()); + } + + // TODO: There are couple of issues, firstly there are two `Content-Length` headers being added, one by `putBlob` + // function and secondly by, most likely, by WireMock. Need to to figure out how to tell WireMock not to add `Content-Length` + // header, secondly once that resolve then we get `akka.http.scaladsl.model.EntityStreamException`. + @Ignore("Fix this test case") + @Test + public void updateRange() throws Exception { + mockCreateFile(); + + //#update-range + final Source, NotUsed> source = + FileService.updateRange(containerName() + "/" + blobName(), + ContentTypes.TEXT_PLAIN_UTF8, contentRange(), Source.single(ByteString.fromString(payload())), + Optional.empty()); + + final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); + //#update-range + + final var maybeObjectMetadata = optionalCompletionStage.toCompletableFuture().get(); + Assert.assertTrue(maybeObjectMetadata.isPresent()); + final var objectMetadata = maybeObjectMetadata.get(); + Assert.assertEquals(Optional.of(ETagRawValue()), objectMetadata.getETag()); + Assert.assertEquals(0L, objectMetadata.getContentLength()); + } + + @Test + public void getFile() throws Exception { + mockGetBlob(); + + //#get-file + final Source> source = + FileService.getFile(containerName() + "/" + blobName(), Optional.empty(), Optional.empty()); + + final CompletionStage> eventualPayload = source.runWith(Sink.seq(), system); + //#get-file + + final var actualPayload = eventualPayload.toCompletableFuture().get().stream() + .map(ByteString::utf8String).collect(Collectors.joining()); + Assert.assertEquals(actualPayload, payload()); + } + + @Test + public void getFileProperties() throws Exception { + mockGetBlobProperties(); + + //#get-file-properties + final Source, NotUsed> source = + FileService.getProperties(containerName() + "/" + blobName(), Optional.empty(), Optional.empty()); + + final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); + //#get-file-properties + + final var maybeObjectMetadata = optionalCompletionStage.toCompletableFuture().get(); + Assert.assertTrue(maybeObjectMetadata.isPresent()); + final var objectMetadata = maybeObjectMetadata.get(); + Assert.assertEquals(Optional.of(ETagRawValue()), objectMetadata.getETag()); + Assert.assertEquals(contentLength(), objectMetadata.getContentLength()); + Assert.assertEquals(Optional.of(ContentTypes.TEXT_PLAIN_UTF8.toString()), objectMetadata.getContentType()); + } + + // TODO: There are couple of issues, firstly there are two `Content-Length` headers being added, one by `putBlob` + // function and secondly by, most likely, by WireMock. Need to to figure out how to tell WireMock not to add `Content-Length` + // header, secondly once that resolve then we get `akka.http.scaladsl.model.EntityStreamException`. + @Ignore("Fix this test case") + @Test + public void clearRange() throws Exception { + mockClearRange(); + + //#clear-range + final Source, NotUsed> source = + FileService.clearRange(containerName() + "/" + blobName(), subRange(), Optional.empty()); + + final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); + //#clear-range + + final var maybeObjectMetadata = optionalCompletionStage.toCompletableFuture().get(); + Assert.assertTrue(maybeObjectMetadata.isPresent()); + final var objectMetadata = maybeObjectMetadata.get(); + Assert.assertEquals(Optional.of(ETagRawValue()), objectMetadata.getETag()); + Assert.assertEquals(0L, objectMetadata.getContentLength()); + } + + @Test + public void deleteFile() throws Exception { + mockDeleteBlob(); + + //#delete-file + final Source, NotUsed> source = + FileService.deleteFile(containerName() + "/" + blobName(), Optional.empty(), Optional.empty()); + + final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); + //#delete-file + + final var maybeObjectMetadata = optionalCompletionStage.toCompletableFuture().get(); + Assert.assertTrue(maybeObjectMetadata.isPresent()); + final var objectMetadata = maybeObjectMetadata.get(); + Assert.assertEquals(Optional.of(ETagRawValue()), objectMetadata.getETag()); + Assert.assertEquals(0L, objectMetadata.getContentLength()); + } +} diff --git a/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala b/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala new file mode 100644 index 0000000000..b79a166e4d --- /dev/null +++ b/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala @@ -0,0 +1,291 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package docs.scaladsl + +import akka.NotUsed +import akka.http.scaladsl.model.ContentTypes +import akka.stream.alpakka.azure.storage.scaladsl.StorageWireMockBase +import akka.stream.alpakka.azure.storage.scaladsl.StorageWireMockBase.ETagRawValue +import akka.stream.alpakka.testkit.scaladsl.LogCapturing +import akka.stream.scaladsl.{Keep, Sink, Source} +import akka.util.ByteString +import org.scalatest.concurrent.ScalaFutures +import org.scalatest.matchers.should.Matchers +import org.scalatest.wordspec.AnyWordSpecLike +import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, OptionValues} + +import scala.concurrent.Future +import scala.concurrent.duration._ + +class StorageSpec + extends StorageWireMockBase + with AnyWordSpecLike + with BeforeAndAfterAll + with BeforeAndAfterEach + with Matchers + with ScalaFutures + with OptionValues + with LogCapturing { + + override implicit val patienceConfig: PatienceConfig = PatienceConfig(3.minutes, 100.millis) + + override protected def afterEach(): Unit = mock.removeMappings() + + "AzureStorage Blob connector" should { + + "create container" in { + mockCreateContainer() + + //#create-container + import akka.stream.alpakka.azure.storage.scaladsl.BlobService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[Option[ObjectMetadata], NotUsed] = BlobService.createContainer(containerName) + + val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.runWith(Sink.head) + //#create-container + + val maybeObjectMetadata = eventualMaybeMetadata.futureValue + maybeObjectMetadata shouldBe defined + val objectMetadata = maybeObjectMetadata.get + objectMetadata.contentLength shouldBe 0L + objectMetadata.eTag shouldBe Some(ETagRawValue) + } + + // TODO: There are couple of issues, firstly there are two `Content-Length` headers being added, one by `putBlob` + // function and secondly by, most likely, by WireMock. Need to to figure out how to tell WireMock not to add `Content-Length` + // header, secondly once that resolve then we get `akka.http.scaladsl.model.EntityStreamException`. + "put blob" ignore { + mockPutBlob() + + //#put-blob + import akka.stream.alpakka.azure.storage.scaladsl.BlobService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[Option[ObjectMetadata], NotUsed] = + BlobService.putBlob( + objectPath = s"$containerName/$blobName", + contentType = ContentTypes.`text/plain(UTF-8)`, + contentLength = contentLength, + payload = Source.single(ByteString(payload)) + ) + + val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.runWith(Sink.head) + //#put-blob + + val maybeObjectMetadata = eventualMaybeMetadata.futureValue + maybeObjectMetadata shouldBe defined + val objectMetadata = maybeObjectMetadata.get + objectMetadata.contentLength shouldBe contentLength + objectMetadata.eTag shouldBe Some(ETagRawValue) + } + + "get blob" in { + mockGetBlob() + + //#get-blob + import akka.stream.alpakka.azure.storage.scaladsl.BlobService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[ByteString, Future[ObjectMetadata]] = + BlobService.getBlob(objectPath = s"$containerName/$blobName") + + val eventualText = source.toMat(Sink.seq)(Keep.right).run() + //#get-blob + + eventualText.futureValue.map(_.utf8String).mkString("") shouldBe payload + } + + "get blob range" in { + mockGetBlobWithRange() + + //#get-blob-range + import akka.stream.alpakka.azure.storage.scaladsl.BlobService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[ByteString, Future[ObjectMetadata]] = + BlobService.getBlob(objectPath = s"$containerName/$blobName", range = Some(subRange)) + + val eventualText: Future[Seq[ByteString]] = source.toMat(Sink.seq)(Keep.right).run() + //#get-blob-range + + eventualText.futureValue.map(_.utf8String).mkString("") shouldBe "quick" + } + + "get blob properties" in { + mockGetBlobProperties(); + + //#get-blob-properties + import akka.stream.alpakka.azure.storage.scaladsl.BlobService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[Option[ObjectMetadata], NotUsed] = + BlobService.getProperties(objectPath = s"$containerName/$blobName") + + val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.runWith(Sink.head) + //#get-blob-properties + + val maybeMetadata = eventualMaybeMetadata.futureValue + maybeMetadata shouldBe defined + val metadata = maybeMetadata.get + metadata.eTag shouldBe Some(ETagRawValue) + metadata.contentLength shouldBe contentLength + metadata.contentType shouldBe Some(ContentTypes.`text/plain(UTF-8)`.value) + } + + "delete blob" in { + mockDeleteBlob() + + //#delete-blob + import akka.stream.alpakka.azure.storage.scaladsl.BlobService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[Option[ObjectMetadata], NotUsed] = + BlobService.deleteBlob(objectPath = s"$containerName/$blobName") + + val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.runWith(Sink.head) + //#delete-blob + + val maybeMetadata = eventualMaybeMetadata.futureValue + maybeMetadata shouldBe defined + val metadata = maybeMetadata.get + metadata.eTag shouldBe Some(ETagRawValue) + metadata.contentLength shouldBe 0L + } + } + + "AzureStorage File connector" should { + + "create file" in { + mockCreateFile() + + //#create-file + import akka.stream.alpakka.azure.storage.scaladsl.FileService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[Option[ObjectMetadata], NotUsed] = + FileService.createFile(objectPath = s"$containerName/$blobName", + contentType = ContentTypes.`text/plain(UTF-8)`, + maxSize = contentLength) + + val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.runWith(Sink.head) + //#create-file + + val maybeMetadata = eventualMaybeMetadata.futureValue + maybeMetadata shouldBe defined + val metadata = maybeMetadata.get + metadata.eTag shouldBe Some(ETagRawValue) + metadata.contentLength shouldBe 0L + } + + // TODO: There are couple of issues, firstly there are two `Content-Length` headers being added, one by `putBlob` + // function and secondly by, most likely, by WireMock. Need to to figure out how to tell WireMock not to add `Content-Length` + // header, secondly once that resolve then we get `akka.http.scaladsl.model.EntityStreamException`. + "update range" ignore { + mockUpdateRange() + + //#update-range + import akka.stream.alpakka.azure.storage.scaladsl.FileService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[Option[ObjectMetadata], NotUsed] = + FileService.updateRange( + objectPath = s"$containerName/$blobName", + contentType = ContentTypes.`text/plain(UTF-8)`, + range = contentRange, + payload = Source.single(ByteString(payload)) + ) + + val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.toMat(Sink.head)(Keep.right).run() + //#update-range + + val maybeObjectMetadata = eventualMaybeMetadata.futureValue + maybeObjectMetadata shouldBe defined + val objectMetadata = maybeObjectMetadata.get + objectMetadata.contentLength shouldBe 0 + objectMetadata.eTag shouldBe Some(ETagRawValue) + } + + "get file" in { + mockGetBlob() + + //#get-file + import akka.stream.alpakka.azure.storage.scaladsl.FileService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[ByteString, Future[ObjectMetadata]] = + FileService.getFile(objectPath = s"$containerName/$blobName") + + val eventualText: Future[Seq[ByteString]] = source.toMat(Sink.seq)(Keep.right).run() + //#get-file + + eventualText.futureValue.map(_.utf8String).mkString("") shouldBe payload + } + + "get file properties" in { + mockGetBlobProperties() + + //#get-file-properties + import akka.stream.alpakka.azure.storage.scaladsl.FileService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[Option[ObjectMetadata], NotUsed] = + FileService.getProperties(objectPath = s"$containerName/$blobName") + + val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.toMat(Sink.head)(Keep.right).run() + //#get-file-properties + + val maybeMetadata = eventualMaybeMetadata.futureValue + maybeMetadata shouldBe defined + val metadata = maybeMetadata.get + metadata.eTag shouldBe Some(ETagRawValue) + metadata.contentLength shouldBe contentLength + metadata.contentType shouldBe Some(ContentTypes.`text/plain(UTF-8)`.value) + } + + // TODO: There are couple of issues, firstly there are two `Content-Length` headers being added, one by `putBlob` + // function and secondly by, most likely, by WireMock. Need to to figure out how to tell WireMock not to add `Content-Length` + // header, secondly once that resolve then we get `akka.http.scaladsl.model.EntityStreamException`. + "clear range" ignore { + mockClearRange() + + //#clear-range + import akka.stream.alpakka.azure.storage.scaladsl.FileService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[Option[ObjectMetadata], NotUsed] = + FileService.clearRange(objectPath = s"$containerName/$blobName", range = subRange) + + val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.runWith(Sink.head) + //#clear-range + + val maybeMetadata = eventualMaybeMetadata.futureValue + maybeMetadata shouldBe defined + val metadata = maybeMetadata.get + metadata.eTag shouldBe Some(ETagRawValue) + metadata.contentLength shouldBe 0L + } + + "delete file" in { + mockDeleteBlob() + + //#delete-file + import akka.stream.alpakka.azure.storage.scaladsl.FileService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[Option[ObjectMetadata], NotUsed] = + FileService.deleteFile(objectPath = s"$containerName/$blobName") + + val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.runWith(Sink.head) + //#delete-file + + val maybeMetadata = eventualMaybeMetadata.futureValue + maybeMetadata shouldBe defined + val metadata = maybeMetadata.get + metadata.eTag shouldBe Some(ETagRawValue) + metadata.contentLength shouldBe 0L + } + } +} diff --git a/azure-storage/src/test/scala/docs/scaladsl/StorageTest.scala b/azure-storage/src/test/scala/docs/scaladsl/StorageTest.scala deleted file mode 100644 index e8809c5fb2..0000000000 --- a/azure-storage/src/test/scala/docs/scaladsl/StorageTest.scala +++ /dev/null @@ -1,256 +0,0 @@ -/* - * Copyright (C) since 2016 Lightbend Inc. - */ - -package docs.scaladsl - -import akka.NotUsed -import akka.actor.ActorSystem -import akka.http.scaladsl.model.ContentTypes -import akka.http.scaladsl.model.headers.ByteRange -import akka.stream.alpakka.azure.storage.scaladsl.StorageIntegrationSpec -import akka.stream.scaladsl.{Keep, Sink, Source} -import akka.util.ByteString - -import scala.concurrent.Future - -class StorageTest extends StorageIntegrationSpec { - - override protected implicit val system: ActorSystem = ActorSystem("StorageSystem") - - "AzureStorage Blob connector" should { - - "create container" in { - - //#create-container - import akka.stream.alpakka.azure.storage.scaladsl.BlobService - import akka.stream.alpakka.azure.storage.ObjectMetadata - - val source: Source[Option[ObjectMetadata], NotUsed] = BlobService.createContainer("my-container") - - val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.toMat(Sink.head)(Keep.right).run() - //#create-container - - val metaData = eventualMaybeMetadata.futureValue - - // TODO: validate - println(metaData) - } - - "put blob" in { - - //#put-blob - import akka.stream.alpakka.azure.storage.scaladsl.BlobService - import akka.stream.alpakka.azure.storage.ObjectMetadata - - val sampleText = "some random text" - val contentLength = sampleText.length - val payload = Source.single(ByteString.fromString(sampleText)) - val source: Source[Option[ObjectMetadata], NotUsed] = - BlobService.putBlob( - objectPath = "my-container/my-blob.txt", - contentType = ContentTypes.`text/plain(UTF-8)`, - contentLength = contentLength, - payload = payload - ) - - val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.toMat(Sink.head)(Keep.right).run() - //#put-blob - - val metaData = eventualMaybeMetadata.futureValue - - // TODO: validate - println(metaData) - } - - "get blob" in { - - //#get-blob - import akka.stream.alpakka.azure.storage.scaladsl.BlobService - import akka.stream.alpakka.azure.storage.ObjectMetadata - - val source: Source[ByteString, Future[ObjectMetadata]] = - BlobService.getBlob(objectPath = "my-container/my-blob.txt") - - val eventualText: Future[ByteString] = source.toMat(Sink.head)(Keep.right).run() - //#get-blob - - val text = eventualText.futureValue - - // TODO: validate - println(text) - } - - "get blob range" in { - - //#get-blob-range - import akka.stream.alpakka.azure.storage.scaladsl.BlobService - import akka.stream.alpakka.azure.storage.ObjectMetadata - - val source: Source[ByteString, Future[ObjectMetadata]] = - BlobService.getBlob(objectPath = "my-container/my-blob.txt", range = Some(ByteRange.Slice(2, 5))) - - val eventualText: Future[ByteString] = source.toMat(Sink.head)(Keep.right).run() - //#get-blob-range - - val text = eventualText.futureValue - - // TODO: validate - println(text) - } - - "get blob properties" in { - - //#get-blob-properties - import akka.stream.alpakka.azure.storage.scaladsl.BlobService - import akka.stream.alpakka.azure.storage.ObjectMetadata - - val source: Source[Option[ObjectMetadata], NotUsed] = - BlobService.getProperties(objectPath = "my-container/my-blob.txt") - - val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.toMat(Sink.head)(Keep.right).run() - //#get-blob-properties - - val maybeMetadata = eventualMaybeMetadata.futureValue - - // TODO: validate - println(maybeMetadata) - } - - "delete blob" in { - - //#delete-blob - import akka.stream.alpakka.azure.storage.scaladsl.BlobService - import akka.stream.alpakka.azure.storage.ObjectMetadata - - val source: Source[Option[ObjectMetadata], NotUsed] = - BlobService.deleteBlob(objectPath = "my-container/my-blob.txt") - - val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.toMat(Sink.head)(Keep.right).run() - //#delete-blob - - val maybeMetadata = eventualMaybeMetadata.futureValue - - // TODO: validate - println(maybeMetadata) - } - } - - "AzureStorage File connector" should { - - "create file" in { - - //#create-file - import akka.stream.alpakka.azure.storage.scaladsl.FileService - import akka.stream.alpakka.azure.storage.ObjectMetadata - - val source: Source[Option[ObjectMetadata], NotUsed] = - FileService.createFile(objectPath = "/my-directory/my-file.txt", - contentType = ContentTypes.`text/plain(UTF-8)`, - maxSize = 44) - - val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.toMat(Sink.head)(Keep.right).run() - //#create-file - - val metaData = eventualMaybeMetadata.futureValue - - // TODO: validate - println(metaData) - } - - "update range" in { - - //#update-range - import akka.stream.alpakka.azure.storage.scaladsl.FileService - import akka.stream.alpakka.azure.storage.ObjectMetadata - - val sampleText = "The quick brown fox jumps over the lazy dog." - val payload = Source.single(ByteString.fromString(sampleText)) - val source: Source[Option[ObjectMetadata], NotUsed] = - FileService.updateRange(objectPath = "/my-directory/my-file.txt", - contentType = ContentTypes.`text/plain(UTF-8)`, - range = ByteRange.Slice(0, sampleText.length - 1), - payload = payload) - - val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.toMat(Sink.head)(Keep.right).run() - //#update-range - - val metaData = eventualMaybeMetadata.futureValue - - // TODO: validate - println(metaData) - } - - "get file" in { - - //#get-file - import akka.stream.alpakka.azure.storage.scaladsl.FileService - import akka.stream.alpakka.azure.storage.ObjectMetadata - - val source: Source[ByteString, Future[ObjectMetadata]] = - FileService.getFile(objectPath = "/my-directory/my-file.txt") - - val eventualText: Future[ByteString] = source.toMat(Sink.head)(Keep.right).run() - //#get-file - - val text = eventualText.futureValue - - // TODO: validate - println(text) - } - - "get file properties" in { - - //#get-file-properties - import akka.stream.alpakka.azure.storage.scaladsl.FileService - import akka.stream.alpakka.azure.storage.ObjectMetadata - - val source: Source[Option[ObjectMetadata], NotUsed] = - FileService.getProperties(objectPath = "/my-directory/my-file.txt") - - val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.toMat(Sink.head)(Keep.right).run() - //#get-file-properties - - val metaData = eventualMaybeMetadata.futureValue - - // TODO: validate - println(metaData) - } - - "clear range" in { - - //#clear-range - import akka.stream.alpakka.azure.storage.scaladsl.FileService - import akka.stream.alpakka.azure.storage.ObjectMetadata - - val source: Source[Option[ObjectMetadata], NotUsed] = - FileService.clearRange(objectPath = "/my-directory/my-file.txt", range = ByteRange.Slice(5, 11)) - - val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.toMat(Sink.head)(Keep.right).run() - //#clear-range - - val metaData = eventualMaybeMetadata.futureValue - - // TODO: validate - println(metaData) - } - - "delete file" in { - - //#delete-file - import akka.stream.alpakka.azure.storage.scaladsl.FileService - import akka.stream.alpakka.azure.storage.ObjectMetadata - - val source: Source[Option[ObjectMetadata], NotUsed] = - FileService.deleteFile(objectPath = "/my-directory/my-file.txt") - - val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.toMat(Sink.head)(Keep.right).run() - //#delete-file - - val metaData = eventualMaybeMetadata.futureValue - - // TODO: validate - println(metaData) - } - } -} From 6a5f80c878480a100f8434a16207536f1232f380 Mon Sep 17 00:00:00 2001 From: sfali Date: Wed, 28 Aug 2024 12:17:32 -0400 Subject: [PATCH 38/78] fix java file paths #3253 --- docs/src/main/paradox/azure-storage.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/src/main/paradox/azure-storage.md b/docs/src/main/paradox/azure-storage.md index 21b0c2cadf..8de24b9058 100644 --- a/docs/src/main/paradox/azure-storage.md +++ b/docs/src/main/paradox/azure-storage.md @@ -66,7 +66,7 @@ Scala : @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #create-container } Java -: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #create-container } +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.scala) { #create-container } ### Put Blob @@ -76,7 +76,7 @@ Scala : @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #put-blob } Java -: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #put-blob } +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.scala) { #put-blob } ### Get Blob @@ -86,7 +86,7 @@ Scala : @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #get-blob } Java -: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #get-blob } +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.scala) { #get-blob } In order to download a range of a file's data you can use overloaded method which additionally takes `ByteRange` as argument. @@ -94,7 +94,7 @@ Scala : @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #get-blob-range } Java -: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #get-blob-range } +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.scala) { #get-blob-range } ### Get blob properties without downloading blob @@ -104,7 +104,7 @@ Scala : @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #get-blob-properties } Java -: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #get-blob-properties } +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.scala) { #get-blob-properties } ### Delete Blob @@ -114,7 +114,7 @@ Scala : @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #delete-blob } Java -: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #delte-blob } +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.scala) { #delte-blob } ## Supported operations on File service @@ -126,7 +126,7 @@ Scala : @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #create-file } Java -: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #create-file } +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.scala) { #create-file } ### Update Range @@ -136,7 +136,7 @@ Scala : @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #update-range } Java -: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #update-range } +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.scala) { #update-range } Range can be cleared using `ClearRange` function. @@ -144,7 +144,7 @@ Scala : @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #clear-range } Java -: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #clear-range } +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.scala) { #clear-range } ### Get File @@ -154,7 +154,7 @@ Scala : @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #get-file } Java -: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #get-file } +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.scala) { #get-file } ### Get file properties without downloading blob @@ -164,7 +164,7 @@ Scala : @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #get-file-properties } Java -: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #get-file-properties } +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.scala) { #get-file-properties } ### Delete Blob @@ -174,4 +174,4 @@ Scala : @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #delete-file } Java -: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #delte-file } +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.scala) { #delte-file } From b96ed3763d48b4980ba90ddb8fe12a03d8f02fcb Mon Sep 17 00:00:00 2001 From: sfali Date: Wed, 28 Aug 2024 12:36:20 -0400 Subject: [PATCH 39/78] fix compilation #3253 --- .../stream/alpakka/azure/storage/scaladsl/FileService.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala index 344cce4f8c..673dc3edd5 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala @@ -96,7 +96,7 @@ object FileService { contentType: ContentType = ContentTypes.`application/octet-stream`, range: ByteRange.Slice, payload: Source[ByteString, _], - leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed.type] = + leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = AzureStorageStream.updateOrClearRange(objectPath, contentType, range, Some(payload), leaseId) /** @@ -110,6 +110,6 @@ object FileService { */ def clearRange(objectPath: String, range: ByteRange.Slice, - leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed.type] = + leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = AzureStorageStream.updateOrClearRange(objectPath, ContentTypes.NoContentType, range, None, leaseId) } From bc4508608489282ba0438c5bb85e211b8b8879ac Mon Sep 17 00:00:00 2001 From: sfali Date: Wed, 28 Aug 2024 12:44:47 -0400 Subject: [PATCH 40/78] fix compilations #3253 --- .../azure/storage/scaladsl/AzureIntegrationTest.scala | 2 +- .../azure/storage/scaladsl/StorageWireMockBase.scala | 6 +----- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureIntegrationTest.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureIntegrationTest.scala index 53fce111d7..ee57cd0703 100644 --- a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureIntegrationTest.scala +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureIntegrationTest.scala @@ -8,7 +8,7 @@ package storage package scaladsl import akka.actor.ActorSystem -import org.junit.Ignore +import org.scalatest.Ignore @Ignore class AzureIntegrationTest extends StorageIntegrationSpec { diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala index 5f2212d092..d1cdc7bf50 100644 --- a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala @@ -9,22 +9,18 @@ package scaladsl import akka.actor.ActorSystem import akka.http.scaladsl.model.headers._ -import StorageWireMockBase.{config, getCallerName, initServer, AccountName} +import StorageWireMockBase.{config, getCallerName, initServer, AccountName, ETagValue} import akka.http.scaladsl.model.ContentTypes import akka.testkit.TestKit import com.github.tomakehurst.wiremock.WireMockServer import com.github.tomakehurst.wiremock.client.WireMock import com.github.tomakehurst.wiremock.client.WireMock._ import com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig -import com.github.tomakehurst.wiremock.http.trafficlistener.ConsoleNotifyingWiremockNetworkTrafficListener -import com.github.tomakehurst.wiremock.matching.StringValuePattern import com.github.tomakehurst.wiremock.stubbing.StubMapping import com.typesafe.config.{Config, ConfigFactory} abstract class StorageWireMockBase(_system: ActorSystem, val _wireMockServer: WireMockServer) extends TestKit(_system) { - import StorageWireMockBase._ - private val port = _wireMockServer.port() protected val mock = new WireMock("localhost", port) From 50061172bd1174e46634173ef04fbc5f331ae5d6 Mon Sep 17 00:00:00 2001 From: sfali Date: Wed, 28 Aug 2024 13:12:16 -0400 Subject: [PATCH 41/78] fix documentation #3253 --- docs/src/main/paradox/azure-storage.md | 26 +++++++++++++------------- project/project-info.conf | 18 ++++++++++++++++++ 2 files changed, 31 insertions(+), 13 deletions(-) diff --git a/docs/src/main/paradox/azure-storage.md b/docs/src/main/paradox/azure-storage.md index 8de24b9058..711616bb7d 100644 --- a/docs/src/main/paradox/azure-storage.md +++ b/docs/src/main/paradox/azure-storage.md @@ -18,7 +18,7 @@ Additionally, add the dependencies as below. @@dependency [sbt,Maven,Gradle] { group=com.lightbend.akka -artifact=akka-stream-alpakka-s3_$scala.binary.version$ +artifact=akka-stream-alpakka-azure-storage_$scala.binary.version$ version=$project.version$ symbol2=AkkaVersion value2=$akka.version$ @@ -66,7 +66,7 @@ Scala : @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #create-container } Java -: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.scala) { #create-container } +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.java) { #create-container } ### Put Blob @@ -76,7 +76,7 @@ Scala : @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #put-blob } Java -: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.scala) { #put-blob } +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.java) { #put-blob } ### Get Blob @@ -86,7 +86,7 @@ Scala : @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #get-blob } Java -: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.scala) { #get-blob } +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.java) { #get-blob } In order to download a range of a file's data you can use overloaded method which additionally takes `ByteRange` as argument. @@ -94,7 +94,7 @@ Scala : @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #get-blob-range } Java -: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.scala) { #get-blob-range } +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.java) { #get-blob-range } ### Get blob properties without downloading blob @@ -104,7 +104,7 @@ Scala : @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #get-blob-properties } Java -: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.scala) { #get-blob-properties } +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.java) { #get-blob-properties } ### Delete Blob @@ -114,7 +114,7 @@ Scala : @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #delete-blob } Java -: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.scala) { #delte-blob } +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.java) { #delete-blob } ## Supported operations on File service @@ -126,7 +126,7 @@ Scala : @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #create-file } Java -: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.scala) { #create-file } +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.java) { #create-file } ### Update Range @@ -136,7 +136,7 @@ Scala : @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #update-range } Java -: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.scala) { #update-range } +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.java) { #update-range } Range can be cleared using `ClearRange` function. @@ -144,7 +144,7 @@ Scala : @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #clear-range } Java -: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.scala) { #clear-range } +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.java) { #clear-range } ### Get File @@ -154,7 +154,7 @@ Scala : @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #get-file } Java -: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.scala) { #get-file } +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.java) { #get-file } ### Get file properties without downloading blob @@ -164,7 +164,7 @@ Scala : @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #get-file-properties } Java -: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.scala) { #get-file-properties } +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.java) { #get-file-properties } ### Delete Blob @@ -174,4 +174,4 @@ Scala : @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #delete-file } Java -: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.scala) { #delte-file } +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.java) { #delete-file } diff --git a/project/project-info.conf b/project/project-info.conf index ba9f93499e..6bf522d117 100644 --- a/project/project-info.conf +++ b/project/project-info.conf @@ -96,6 +96,24 @@ project-info { } ] } + azure-storage: ${project-info.shared-info} { + title: "Alpakka Azure Storage" + jpms-name: "akka.stream.alpakka.azure.storage" + issues.url: ${project-info.labels}"azure-storage" + levels: [ + { + readiness: CommunityDriven + since: "2024-08-28" + since-version: "8.0.0" + } + ] + api-docs: [ + { + url: ${project-info.scaladoc}"azure/storage/index.html" + text: "API (Scaladoc)" + } + ] + } azure-storage-queue: ${project-info.shared-info} { title: "Alpakka Azure Storage Queue" jpms-name: "akka.stream.alpakka.azure.storagequeue" From 9be3bf85e1e54e36e7e9936b25964e22ee9ecb92 Mon Sep 17 00:00:00 2001 From: sfali Date: Wed, 28 Aug 2024 13:47:54 -0400 Subject: [PATCH 42/78] minor typo #3253 --- docs/src/main/paradox/azure-storage.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/main/paradox/azure-storage.md b/docs/src/main/paradox/azure-storage.md index 711616bb7d..b197499408 100644 --- a/docs/src/main/paradox/azure-storage.md +++ b/docs/src/main/paradox/azure-storage.md @@ -98,7 +98,7 @@ Java ### Get blob properties without downloading blob -The [`Get Blob Properties`](https://learn.microsoft.com/en-us/rest/api/storageservices/get-blob-properties) operation returns all user-defined metadata, standard HTTP properties, and system properties for the blob. (**Note:** Current implementation does not return user-defined metatdata.) +The [`Get Blob Properties`](https://learn.microsoft.com/en-us/rest/api/storageservices/get-blob-properties) operation returns all user-defined metadata, standard HTTP properties, and system properties for the blob. (**Note:** Current implementation does not return user-defined metadata.) Scala : @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #get-blob-properties } From b8600fc897eb3c4167a29a29a6e6a19761e053e2 Mon Sep 17 00:00:00 2001 From: sfali Date: Wed, 28 Aug 2024 13:48:49 -0400 Subject: [PATCH 43/78] extracted some constants to be used in sub classes #3253 --- .../scaladsl/StorageIntegrationSpec.scala | 38 ++++++++++--------- 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageIntegrationSpec.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageIntegrationSpec.scala index 8854127daa..60ad5eb72a 100644 --- a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageIntegrationSpec.scala +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageIntegrationSpec.scala @@ -7,14 +7,14 @@ package azure package storage package scaladsl -import akka.Done +import akka.{Done, NotUsed} import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.ContentTypes import akka.http.scaladsl.model.headers.ByteRange import akka.stream.Attributes import akka.stream.alpakka.testkit.scaladsl.LogCapturing -import akka.stream.scaladsl.{Framing, Keep, Sink, Source} +import akka.stream.scaladsl.{Flow, Framing, Keep, Sink, Source} import akka.testkit.TestKit import akka.util.ByteString import org.scalatest.concurrent.ScalaFutures @@ -38,6 +38,10 @@ trait StorageIntegrationSpec protected val defaultContainerName = "test-container" protected val fileName = "sample-blob.txt" protected val sampleText: String = "The quick brown fox jumps over the lazy dog." + System.lineSeparator() + protected val contentLength: Long = sampleText.length.toLong + protected val objectPath = s"$defaultContainerName/$fileName" + protected val framing: Flow[ByteString, ByteString, NotUsed] = + Framing.delimiter(ByteString(System.lineSeparator()), 256, allowTruncation = true) protected implicit val system: ActorSystem protected implicit lazy val ec: ExecutionContext = system.dispatcher @@ -55,14 +59,13 @@ trait StorageIntegrationSpec val maybeObjectMetadata = BlobService .putBlob( - objectPath = s"$defaultContainerName/$fileName", + objectPath = objectPath, contentType = ContentTypes.`text/plain(UTF-8)`, - contentLength = sampleText.length, - payload = Source.single(ByteString.fromString(sampleText)) + contentLength = contentLength, + payload = Source.single(ByteString(sampleText)) ) .withAttributes(getDefaultAttributes) - .toMat(Sink.head)(Keep.right) - .run() + .runWith(Sink.head) .futureValue maybeObjectMetadata shouldBe defined @@ -73,9 +76,9 @@ trait StorageIntegrationSpec "get blob" in { val (maybeEventualObjectMetadata, eventualText) = BlobService - .getBlob(s"$defaultContainerName/$fileName") + .getBlob(objectPath) .withAttributes(getDefaultAttributes) - .via(Framing.delimiter(ByteString(System.lineSeparator()), 256, allowTruncation = true)) + .via(framing) .map(byteString => byteString.utf8String + System.lineSeparator()) .toMat(Sink.seq)(Keep.both) .run() @@ -83,16 +86,15 @@ trait StorageIntegrationSpec val objectMetadata = maybeEventualObjectMetadata.futureValue objectMetadata.contentMd5 shouldBe Some(calculateDigest(sampleText)) objectMetadata.contentLength shouldBe sampleText.length - eventualText.futureValue.head shouldBe sampleText + eventualText.futureValue.mkString("") shouldBe sampleText } "get blob properties" in { val maybeObjectMetadata = BlobService - .getProperties(s"$defaultContainerName/$fileName") + .getProperties(objectPath) .withAttributes(getDefaultAttributes) - .toMat(Sink.head)(Keep.right) - .run() + .runWith(Sink.head) .futureValue maybeObjectMetadata shouldBe defined @@ -105,9 +107,9 @@ trait StorageIntegrationSpec val range = ByteRange.Slice(0, 8) val (maybeEventualObjectMetadata, eventualText) = BlobService - .getBlob(s"$defaultContainerName/$fileName", Some(range)) + .getBlob(objectPath, Some(range)) .withAttributes(getDefaultAttributes) - .via(Framing.delimiter(ByteString(System.lineSeparator()), 256, allowTruncation = true)) + .via(framing) .map(_.utf8String) .toMat(Sink.seq)(Keep.both) .run() @@ -120,7 +122,7 @@ trait StorageIntegrationSpec "delete blob" in { val maybeObjectMetadata = BlobService - .deleteBlob(s"$defaultContainerName/$fileName") + .deleteBlob(objectPath) .withAttributes(getDefaultAttributes) .toMat(Sink.head)(Keep.right) .run() @@ -132,7 +134,7 @@ trait StorageIntegrationSpec "get blob after delete" in { val maybeObjectMetadata = BlobService - .getProperties(s"$defaultContainerName/$fileName") + .getProperties(objectPath) .withAttributes(getDefaultAttributes) .toMat(Sink.head)(Keep.right) .run() @@ -149,7 +151,7 @@ trait StorageIntegrationSpec .runWith(Sink.ignore) } - private def calculateDigest(text: String) = { + protected def calculateDigest(text: String): String = { val digest = MessageDigest.getInstance("MD5") digest.update(text.getBytes) val bytes = digest.digest() From be29661c3e223f1872d0f68a86e51e6acd5fcc35 Mon Sep 17 00:00:00 2001 From: sfali Date: Wed, 28 Aug 2024 13:49:04 -0400 Subject: [PATCH 44/78] FileService test #3253 --- .../scaladsl/AzureIntegrationTest.scala | 112 ++++++++++++++++++ 1 file changed, 112 insertions(+) diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureIntegrationTest.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureIntegrationTest.scala index ee57cd0703..1c2dfaa8a8 100644 --- a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureIntegrationTest.scala +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureIntegrationTest.scala @@ -8,6 +8,10 @@ package storage package scaladsl import akka.actor.ActorSystem +import akka.http.scaladsl.model.ContentTypes +import akka.http.scaladsl.model.headers.ByteRange +import akka.stream.scaladsl.{Keep, Sink, Source} +import akka.util.ByteString import org.scalatest.Ignore @Ignore @@ -15,4 +19,112 @@ class AzureIntegrationTest extends StorageIntegrationSpec { override protected implicit val system: ActorSystem = ActorSystem("AzureIntegrationTest") + // Azurite doesn't support FileService yet so can't add these tests in the base class + "FileService" should { + "create file" in { + val maybeObjectMetadata = FileService + .createFile(objectPath = objectPath, contentType = ContentTypes.`text/plain(UTF-8)`, maxSize = contentLength) + .withAttributes(getDefaultAttributes) + .runWith(Sink.head) + .futureValue + + maybeObjectMetadata.isDefined shouldBe true + val objectMetadata = maybeObjectMetadata.get + objectMetadata.contentLength shouldBe 0 + } + + "put range" in { + val maybeObjectMetadata = FileService + .updateRange(objectPath = objectPath, + contentType = ContentTypes.`text/plain(UTF-8)`, + range = ByteRange(0, contentLength - 1), + payload = Source.single(ByteString(sampleText))) + .withAttributes(getDefaultAttributes) + .runWith(Sink.head) + .futureValue + + maybeObjectMetadata.isDefined shouldBe true + val objectMetadata = maybeObjectMetadata.get + objectMetadata.contentLength shouldBe 0 + } + + "get file" in { + val (maybeEventualObjectMetadata, eventualText) = FileService + .getFile(objectPath = objectPath) + .via(framing) + .map(byteString => byteString.utf8String + System.lineSeparator()) + .toMat(Sink.seq)(Keep.both) + .run() + + val objectMetadata = maybeEventualObjectMetadata.futureValue + objectMetadata.contentType shouldBe Some(ContentTypes.`text/plain(UTF-8)`.value) + objectMetadata.contentLength shouldBe sampleText.length + eventualText.futureValue.mkString("") shouldBe sampleText + } + + "get file properties" in { + val maybeObjectMetadata = + FileService + .getProperties(objectPath) + .withAttributes(getDefaultAttributes) + .runWith(Sink.head) + .futureValue + + maybeObjectMetadata shouldBe defined + val objectMetadata = maybeObjectMetadata.get + objectMetadata.contentType shouldBe Some(ContentTypes.`text/plain(UTF-8)`.value) + objectMetadata.contentLength shouldBe sampleText.length + } + + "get file range" in { + val range = ByteRange(0, 8) + val (maybeEventualObjectMetadata, eventualText) = + FileService + .getFile(objectPath, Some(range)) + .withAttributes(getDefaultAttributes) + .via(framing) + .map(_.utf8String) + .toMat(Sink.seq)(Keep.both) + .run() + + val objectMetadata = maybeEventualObjectMetadata.futureValue + objectMetadata.contentLength shouldBe (range.last - range.first + 1) + eventualText.futureValue.mkString("") shouldBe "The quick" + } + + "clear range" in { + val range = ByteRange(16, 24) + val maybeObjectMetadata = FileService + .clearRange(objectPath = objectPath, range = range) + .withAttributes(getDefaultAttributes) + .runWith(Sink.head) + .futureValue + + maybeObjectMetadata.get.contentLength shouldBe 0 + } + + "delete file" in { + val maybeObjectMetadata = + FileService + .deleteFile(objectPath) + .withAttributes(getDefaultAttributes) + .toMat(Sink.head)(Keep.right) + .run() + .futureValue + + maybeObjectMetadata.get.contentLength shouldBe 0 + } + + "get file after delete" in { + val maybeObjectMetadata = + FileService + .getProperties(objectPath) + .withAttributes(getDefaultAttributes) + .toMat(Sink.head)(Keep.right) + .run() + .futureValue + + maybeObjectMetadata shouldBe empty + } + } } From 49deaae938616e7c0eeeccd575db500fe4475b79 Mon Sep 17 00:00:00 2001 From: sfali Date: Wed, 28 Aug 2024 19:07:48 -0400 Subject: [PATCH 45/78] introducing factory class to create headers #3253 --- .../azure/storage/StorageHeaders.scala | 147 ++++++++++++++++++ .../azure/storage/headers/headers.scala | 51 ++++++ .../alpakka/azure/storage/package.scala | 5 + 3 files changed, 203 insertions(+) create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageHeaders.scala create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/headers/headers.scala diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageHeaders.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageHeaders.scala new file mode 100644 index 0000000000..fbe16c4083 --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageHeaders.scala @@ -0,0 +1,147 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage + +import akka.http.scaladsl.model.headers.{ByteRange, RawHeader, Range => RangeHeader} +import akka.http.scaladsl.model.{ContentType, HttpHeader} +import headers.{BlobTypeHeader, CustomContentLengthHeader, CustomContentTypeHeader, FileWriteTypeHeader} + +import java.util.Objects + +private[storage] class StorageHeaders private (val contentLengthHeader: Option[HttpHeader] = None, + val contentTypeHeader: Option[HttpHeader] = None, + val rangeHeader: Option[HttpHeader] = None, + val blobTypeHeader: Option[HttpHeader] = None, + val leaseIdHeader: Option[HttpHeader] = None, + val fileWriteTypeHeader: Option[HttpHeader] = None, + val fileTypeHeader: Option[HttpHeader] = None, + val fileMaxContentLengthHeader: Option[HttpHeader] = None, + val pageBlobContentLengthHeader: Option[HttpHeader] = None, + val pageBlobSequenceNumberHeader: Option[HttpHeader] = None) { + + private[storage] def headers: Seq[HttpHeader] = + (contentLengthHeader ++ + contentTypeHeader ++ + rangeHeader ++ + blobTypeHeader ++ + leaseIdHeader ++ + fileWriteTypeHeader ++ + fileTypeHeader ++ + fileMaxContentLengthHeader ++ + pageBlobContentLengthHeader ++ + pageBlobSequenceNumberHeader).toSeq + + private[storage] def withContentLengthHeader(contentLength: Long): StorageHeaders = + copy(contentLengthHeader = Some(CustomContentLengthHeader(contentLength))) + + private[storage] def withContentTypeHeader(contentType: ContentType): StorageHeaders = + copy(contentTypeHeader = Some(CustomContentTypeHeader(contentType))) + + private[storage] def withRangeHeader(range: ByteRange): StorageHeaders = + copy(rangeHeader = Some(RangeHeader(range))) + + private[storage] def withRangeHeader(range: Option[ByteRange]): StorageHeaders = + copy(rangeHeader = range.map(value => RangeHeader(value))) + + private[storage] def withBlobTypeHeader(blobTypeHeader: BlobTypeHeader): StorageHeaders = + copy(blobTypeHeader = Some(blobTypeHeader.header)) + + private[storage] def withLeaseIdHeader(leaseId: Option[String]): StorageHeaders = + copy(leaseIdHeader = leaseId.map(value => RawHeader(LeaseIdHeaderKey, value))) + + private[storage] def withFileWriteTypeHeader(fileWriteTypeHeader: FileWriteTypeHeader): StorageHeaders = + copy(fileWriteTypeHeader = Some(fileWriteTypeHeader.header)) + + private[storage] def withFileTypeHeader(): StorageHeaders = + copy(fileTypeHeader = Some(RawHeader(FileTypeHeaderKey, "file"))) + + private[storage] def withFileMaxContentLengthHeader(contentLength: Long): StorageHeaders = + copy(fileMaxContentLengthHeader = Some(RawHeader(XMsContentLengthHeaderKey, contentLength.toString))) + + private[storage] def withPageBlobContentLengthHeader(contentLength: Long): StorageHeaders = + copy(pageBlobContentLengthHeader = Some(RawHeader(PageBlobContentLengthHeaderKey, contentLength.toString))) + + private[storage] def withPageBlobSequenceNumberHeader(sequenceNumber: Int): StorageHeaders = + copy(pageBlobSequenceNumberHeader = Some(RawHeader(PageBlobSequenceNumberHeaderKey, sequenceNumber.toString))) + + private def copy(contentLengthHeader: Option[HttpHeader] = contentLengthHeader, + contentTypeHeader: Option[HttpHeader] = contentTypeHeader, + rangeHeader: Option[HttpHeader] = rangeHeader, + blobTypeHeader: Option[HttpHeader] = blobTypeHeader, + leaseIdHeader: Option[HttpHeader] = leaseIdHeader, + fileWriteTypeHeader: Option[HttpHeader] = fileWriteTypeHeader, + fileTypeHeader: Option[HttpHeader] = fileTypeHeader, + fileMaxContentLengthHeader: Option[HttpHeader] = fileMaxContentLengthHeader, + pageBlobContentLengthHeader: Option[HttpHeader] = pageBlobContentLengthHeader, + pageBlobSequenceNumberHeader: Option[HttpHeader] = pageBlobSequenceNumberHeader) = + new StorageHeaders( + contentLengthHeader = contentLengthHeader, + contentTypeHeader = contentTypeHeader, + rangeHeader = rangeHeader, + blobTypeHeader = blobTypeHeader, + leaseIdHeader = leaseIdHeader, + fileWriteTypeHeader = fileWriteTypeHeader, + fileTypeHeader = fileTypeHeader, + fileMaxContentLengthHeader = fileMaxContentLengthHeader, + pageBlobContentLengthHeader = pageBlobContentLengthHeader, + pageBlobSequenceNumberHeader = pageBlobSequenceNumberHeader + ) + + override def toString: String = + s"""StorageHeaders( + |contentLengthHeader=${contentLengthHeader.map(_.value()).getOrElse("None")}, + | contentTypeHeader=${contentTypeHeader.map(_.value()).getOrElse("None")}, + | rangeHeader=${rangeHeader.map(_.value()).getOrElse("None")}, + | blobTypeHeader=${blobTypeHeader.map(_.value()).getOrElse("None")}, + | leaseIdHeader=${leaseIdHeader.map(_.value()).getOrElse("None")}, + | fileWriteTypeHeader=${fileWriteTypeHeader.map(_.value()).getOrElse("None")}, + | fileTypeHeader=${fileTypeHeader.map(_.value()).getOrElse("None")}, + | fileMaxContentLengthHeader=${fileMaxContentLengthHeader.map(_.value()).getOrElse("None")}, + | pageBlobContentLengthHeader=${pageBlobContentLengthHeader.map(_.value()).getOrElse("None")}, + | pageBlobSequenceNumberHeader=${pageBlobSequenceNumberHeader.map(_.value()).getOrElse("None")} + |)""".stripMargin.replaceAll(System.lineSeparator(), "") + + override def equals(obj: Any): Boolean = + obj match { + case other: StorageHeaders => + Objects.equals(contentLengthHeader, other.contentLengthHeader) && + Objects.equals(contentTypeHeader, other.contentTypeHeader) && + Objects.equals(rangeHeader, other.rangeHeader) && + Objects.equals(blobTypeHeader, other.blobTypeHeader) && + Objects.equals(leaseIdHeader, other.leaseIdHeader) && + Objects.equals(fileWriteTypeHeader, other.fileWriteTypeHeader) && + Objects.equals(fileMaxContentLengthHeader, other.fileMaxContentLengthHeader) && + Objects.equals(pageBlobContentLengthHeader, other.pageBlobContentLengthHeader) && + Objects.equals(pageBlobSequenceNumberHeader, other.pageBlobSequenceNumberHeader) + case _ => false + } + + override def hashCode(): Int = + Objects.hash( + contentLengthHeader, + contentTypeHeader, + rangeHeader, + blobTypeHeader, + leaseIdHeader, + fileWriteTypeHeader, + fileTypeHeader, + fileMaxContentLengthHeader, + pageBlobContentLengthHeader, + pageBlobSequenceNumberHeader + ) +} + +private[storage] object StorageHeaders { + private[storage] val Empty = new StorageHeaders() + + private[storage] def apply(): StorageHeaders = Empty + + /** + * Java Api + */ + def create(): StorageHeaders = Empty +} diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/headers/headers.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/headers/headers.scala new file mode 100644 index 0000000000..bfb1e09db9 --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/headers/headers.scala @@ -0,0 +1,51 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package headers + +import akka.annotation.InternalApi +import akka.http.scaladsl.model.{ContentType, HttpHeader} +import akka.http.scaladsl.model.headers.{CustomHeader, RawHeader} + +private[storage] case class CustomContentTypeHeader(contentType: ContentType) extends CustomHeader { + override def name(): String = "Content-Type" + + override def value(): String = contentType.value + + override def renderInRequests(): Boolean = true + + override def renderInResponses(): Boolean = true +} + +private[storage] case class CustomContentLengthHeader(contentLength: Long) extends CustomHeader { + override def name(): String = "Content-Length" + + override def value(): String = contentLength.toString + + override def renderInRequests(): Boolean = true + + override def renderInResponses(): Boolean = true +} + +private[storage] case class BlobTypeHeader(blobType: String) { + @InternalApi private[storage] def header: HttpHeader = RawHeader(BlobTypeHeaderKey, blobType) +} + +object BlobTypeHeader { + private[storage] val BlockBlobHeader = new BlobTypeHeader(BlockBlobType) + private[storage] val PageBlobHeader = new BlobTypeHeader(PageBlobType) + private[storage] val AppendBlobHeader = new BlobTypeHeader(AppendBlobType) +} + +private[storage] case class FileWriteTypeHeader(writeType: String) { + @InternalApi private[storage] def header: HttpHeader = RawHeader(FileWriteTypeHeaderKey, writeType) +} + +object FileWriteTypeHeader { + private[storage] val UpdateFileHeader = new FileWriteTypeHeader("update") + private[storage] val ClearFileHeader = new FileWriteTypeHeader("clear") +} diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala index 402660bbd8..0595bacf3a 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala @@ -21,12 +21,17 @@ package object storage { private[storage] val FileWriteTypeHeaderKey = "x-ms-write" private[storage] val XMsContentLengthHeaderKey = "x-ms-content-length" private[storage] val FileTypeHeaderKey = "x-ms-type" + private[storage] val PageBlobContentLengthHeaderKey = "x-ms-blob-content-length" + private[storage] val PageBlobSequenceNumberHeaderKey = "x-ms-blob-sequence-number" private[storage] val AnonymousAuthorizationType = "anon" private[storage] val SharedKeyAuthorizationType = "SharedKey" private[storage] val SharedKeyLiteAuthorizationType = "SharedKeyLite" private[storage] val SasAuthorizationType = "sas" private[storage] val BlobType = "blob" private[storage] val FileType = "file" + private[storage] val BlockBlobType = "BlockBlob" + private[storage] val PageBlobType = "PageBlob" + private[storage] val AppendBlobType = "AppendBlob" private[storage] def getFormattedDate: String = DateTimeFormatter.RFC_1123_DATE_TIME.format(ZonedDateTime.now(ZoneOffset.UTC)) From cf48d55538bc25d94d5792fb572a38fc371685bc Mon Sep 17 00:00:00 2001 From: sfali Date: Wed, 28 Aug 2024 19:11:36 -0400 Subject: [PATCH 46/78] updated implementation to get headers from dsl #3253 1. DSL to create headers based on function and pass it to implementation 2. Remove "bobType" from "putBlob", separate functions will be introduced for page and append blocks --- .../storage/impl/AzureStorageStream.scala | 114 ++++++------------ .../azure/storage/javadsl/BlobService.scala | 57 ++++++--- .../azure/storage/javadsl/FileService.scala | 85 ++++++++++--- .../azure/storage/scaladsl/BlobService.scala | 31 +++-- .../azure/storage/scaladsl/FileService.scala | 54 +++++++-- .../test/java/docs/javadsl/StorageTest.java | 2 +- 6 files changed, 216 insertions(+), 127 deletions(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala index 8656934980..900f433b7f 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala @@ -12,17 +12,9 @@ import akka.actor.ActorSystem import akka.dispatch.ExecutionContexts import akka.http.scaladsl.Http import akka.http.scaladsl.model.StatusCodes.{Accepted, Created, NotFound, OK} -import akka.http.scaladsl.model.headers.{ - ByteRange, - CustomHeader, - RawHeader, - `Content-Length`, - `Content-Type`, - Range => RangeHeader -} +import akka.http.scaladsl.model.headers.{`Content-Length`, `Content-Type`, CustomHeader} import akka.http.scaladsl.model.{ ContentType, - HttpEntity, HttpHeader, HttpMethod, HttpMethods, @@ -30,6 +22,7 @@ import akka.http.scaladsl.model.{ HttpResponse, ResponseEntity, StatusCode, + UniversalEntity, Uri } import akka.http.scaladsl.unmarshalling.Unmarshal @@ -46,9 +39,8 @@ object AzureStorageStream { private[storage] def getObject(storageType: String, objectPath: String, - range: Option[ByteRange], versionId: Option[String], - leaseId: Option[String]): Source[ByteString, Future[ObjectMetadata]] = { + headers: Seq[HttpHeader]): Source[ByteString, Future[ObjectMetadata]] = { Source .fromMaterializer { (mat, attr) => implicit val system: ActorSystem = mat.system @@ -60,7 +52,7 @@ object AzureStorageStream { storageType = storageType, objectPath = objectPath, queryString = createQueryString(settings, versionId.map(value => s"versionId=$value"))), - headers = populateCommonHeaders(HttpEntity.Empty, range = range, leaseId = leaseId) + headers = headers ) val objectMetadataMat = Promise[ObjectMetadata]() signAndRequest(request, settings)(mat.system) @@ -84,7 +76,7 @@ object AzureStorageStream { private[storage] def getObjectProperties(storageType: String, objectPath: String, versionId: Option[String], - leaseId: Option[String]): Source[Option[ObjectMetadata], NotUsed] = { + headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = { Source .fromMaterializer { (mat, attr) => implicit val system: ActorSystem = mat.system @@ -97,7 +89,7 @@ object AzureStorageStream { storageType = storageType, objectPath = objectPath, queryString = createQueryString(settings, versionId.map(value => s"versionId=$value"))), - headers = populateCommonHeaders(HttpEntity.Empty, leaseId = leaseId) + headers = headers ) signAndRequest(request, settings) @@ -117,7 +109,7 @@ object AzureStorageStream { private[storage] def deleteObject(storageType: String, objectPath: String, versionId: Option[String], - leaseId: Option[String]): Source[Option[ObjectMetadata], NotUsed] = { + headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = { Source .fromMaterializer { (mat, attr) => implicit val system: ActorSystem = mat.system @@ -130,7 +122,7 @@ object AzureStorageStream { storageType = storageType, objectPath = objectPath, queryString = createQueryString(settings, versionId.map(value => s"versionId=$value"))), - headers = populateCommonHeaders(HttpEntity.Empty, leaseId = leaseId) + headers = headers ) signAndRequest(request, settings) @@ -147,17 +139,13 @@ object AzureStorageStream { .mapMaterializedValue(_ => NotUsed) } - private[storage] def putBlob(blobType: String, - objectPath: String, - contentType: ContentType, - contentLength: Long, - payload: Source[ByteString, _], - leaseId: Option[String]): Source[Option[ObjectMetadata], NotUsed] = { + private[storage] def putBlob(objectPath: String, + httpEntity: UniversalEntity, + headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = { Source .fromMaterializer { (mat, attr) => implicit val system: ActorSystem = mat.system val settings = resolveSettings(attr, system) - val httpEntity = HttpEntity(contentType, contentLength, payload) val request = createRequest( method = HttpMethods.PUT, @@ -165,7 +153,7 @@ object AzureStorageStream { storageType = BlobType, objectPath = objectPath, queryString = createQueryString(settings)), - headers = populateCommonHeaders(httpEntity, blobType = Some(blobType), leaseId = leaseId) + headers = headers ).withEntity(httpEntity) handlePutRequest(request, settings) } @@ -173,9 +161,7 @@ object AzureStorageStream { } private[storage] def createFile(objectPath: String, - contentType: ContentType, - maxSize: Long, - leaseId: Option[String]): Source[Option[ObjectMetadata], NotUsed] = { + headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = { Source .fromMaterializer { (mat, attr) => implicit val system: ActorSystem = mat.system @@ -187,32 +173,20 @@ object AzureStorageStream { storageType = FileType, objectPath = objectPath, queryString = createQueryString(settings)), - headers = Seq( - CustomContentTypeHeader(contentType), - RawHeader(XMsContentLengthHeaderKey, maxSize.toString), - RawHeader(FileTypeHeaderKey, "file") - ) ++ leaseId.map(value => RawHeader(LeaseIdHeaderKey, value)) + headers = headers ) handlePutRequest(request, settings) } .mapMaterializedValue(_ => NotUsed) } - private[storage] def updateOrClearRange(objectPath: String, - contentType: ContentType, - range: ByteRange.Slice, - payload: Option[Source[ByteString, _]], - leaseId: Option[String]): Source[Option[ObjectMetadata], NotUsed] = { + private[storage] def updateRange(objectPath: String, + httpEntity: UniversalEntity, + headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = { Source .fromMaterializer { (mat, attr) => implicit val system: ActorSystem = mat.system val settings = resolveSettings(attr, system) - val contentLength = range.last - range.first + 1 - val clearRange = payload.isEmpty - val writeType = if (clearRange) "clear" else "update" - val overrideContentLength = if (clearRange) Some(0L) else None - val httpEntity = - if (clearRange) HttpEntity.empty(contentType) else HttpEntity(contentType, contentLength, payload.get) val request = createRequest( method = HttpMethods.PUT, @@ -220,17 +194,33 @@ object AzureStorageStream { storageType = FileType, objectPath = objectPath, queryString = createQueryString(settings, Some("comp=range"))), - headers = populateCommonHeaders(httpEntity, - overrideContentLength, - range = Some(range), - leaseId = leaseId, - writeType = Some(writeType)) + headers = headers ).withEntity(httpEntity) handlePutRequest(request, settings) } .mapMaterializedValue(_ => NotUsed) } + private[storage] def clearRange(objectPath: String, + headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = { + Source + .fromMaterializer { (mat, attr) => + implicit val system: ActorSystem = mat.system + val settings = resolveSettings(attr, system) + val request = + createRequest( + method = HttpMethods.PUT, + uri = createUri(settings = settings, + storageType = FileType, + objectPath = objectPath, + queryString = createQueryString(settings, Some("comp=range"))), + headers = headers + ) + handlePutRequest(request, settings) + } + .mapMaterializedValue(_ => NotUsed) + } + private[storage] def createContainer(objectPath: String): Source[Option[ObjectMetadata], NotUsed] = Source .fromMaterializer { (mat, attr) => @@ -243,7 +233,7 @@ object AzureStorageStream { storageType = BlobType, objectPath = objectPath, queryString = createQueryString(settings, Some("restype=container"))), - headers = populateCommonHeaders(HttpEntity.Empty) + headers = StorageHeaders().headers ) handlePutRequest(request, settings) @@ -345,32 +335,6 @@ object AzureStorageStream { } } - private def populateCommonHeaders(entity: HttpEntity, - overrideContentLength: Option[Long] = None, - range: Option[ByteRange] = None, - blobType: Option[String] = None, - leaseId: Option[String] = None, - writeType: Option[String] = None) = { - // Azure required to have these two headers (Content-Length & Content-Type) in the request - // in some cases Content-Length header must be set as 0 - val contentLength = overrideContentLength.orElse(entity.contentLengthOption).getOrElse(0L) - val maybeContentLengthHeader = - if (overrideContentLength.isEmpty && contentLength == 0L) None else Some(CustomContentLengthHeader(contentLength)) - - val maybeContentTypeHeader = - emptyStringToOption(entity.contentType.toString()) match { - case Some(value) if value != "none/none" => Some(CustomContentTypeHeader(entity.contentType)) - case _ => None - } - - val maybeRangeHeader = range.map(RangeHeader(_)) - val maybeBlobTypeHeader = blobType.map(value => RawHeader(BlobTypeHeaderKey, value)) - val maybeLeaseIdHeader = leaseId.map(value => RawHeader(LeaseIdHeaderKey, value)) - val maybeWriteTypeHeader = writeType.map(value => RawHeader(FileWriteTypeHeaderKey, value)) - - (maybeContentLengthHeader ++ maybeContentTypeHeader ++ maybeRangeHeader ++ maybeBlobTypeHeader ++ maybeLeaseIdHeader ++ maybeWriteTypeHeader).toSeq - } - private def createRequest(method: HttpMethod, uri: Uri, headers: Seq[HttpHeader]) = HttpRequest(method = method, uri = uri, headers = headers) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala index 720f7a7d5f..0a195aa4bc 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala @@ -11,7 +11,8 @@ import akka.NotUsed import akka.http.javadsl.model._ import akka.http.javadsl.model.headers.ByteRange import akka.http.scaladsl.model.headers.{ByteRange => ScalaByteRange} -import akka.http.scaladsl.model.{ContentType => ScalaContentType} +import akka.http.scaladsl.model.{HttpEntity, ContentType => ScalaContentType} +import akka.stream.alpakka.azure.storage.headers.BlobTypeHeader import akka.stream.alpakka.azure.storage.impl.AzureStorageStream import akka.stream.javadsl.Source import akka.stream.scaladsl.SourceToCompletionStage @@ -38,14 +39,21 @@ object BlobService { def getBlob(objectPath: String, range: ByteRange, versionId: Optional[String], - leaseId: Optional[String]): Source[ByteString, CompletionStage[ObjectMetadata]] = { - val scalaRange = range.asInstanceOf[ScalaByteRange] + leaseId: Optional[String]): Source[ByteString, CompletionStage[ObjectMetadata]] = new Source( AzureStorageStream - .getObject(BlobType, objectPath, Some(scalaRange), Option(versionId.orElse(null)), Option(leaseId.orElse(null))) + .getObject( + BlobType, + objectPath, + Option(versionId.orElse(null)), + StorageHeaders + .create() + .withRangeHeader(range.asInstanceOf[ScalaByteRange]) + .withLeaseIdHeader(Option(leaseId.orElse(null))) + .headers + ) .toCompletionStage() ) - } /** * Gets blob representing `objectPath` with specified range (if applicable). @@ -61,7 +69,15 @@ object BlobService { leaseId: Optional[String]): Source[ByteString, CompletionStage[ObjectMetadata]] = new Source( AzureStorageStream - .getObject(BlobType, objectPath, None, Option(versionId.orElse(null)), Option(leaseId.orElse(null))) + .getObject( + BlobType, + objectPath, + Option(versionId.orElse(null)), + StorageHeaders + .create() + .withLeaseIdHeader(Option(leaseId.orElse(null))) + .headers + ) .toCompletionStage() ) @@ -78,7 +94,10 @@ object BlobService { versionId: Optional[String], leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream - .getObjectProperties(BlobType, objectPath, Option(versionId.orElse(null)), Option(leaseId.orElse(null))) + .getObjectProperties(BlobType, + objectPath, + Option(versionId.orElse(null)), + StorageHeaders.create().withLeaseIdHeader(Option(leaseId.orElse(null))).headers) .map(opt => Optional.ofNullable(opt.orNull)) .asJava @@ -95,7 +114,10 @@ object BlobService { versionId: Optional[String], leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream - .deleteObject(BlobType, objectPath, Option(versionId.orElse(null)), Option(leaseId.orElse(null))) + .deleteObject(BlobType, + objectPath, + Option(versionId.orElse(null)), + StorageHeaders.create().withLeaseIdHeader(Option(leaseId.orElse(null))).headers) .map(opt => Optional.ofNullable(opt.orNull)) .asJava @@ -106,7 +128,6 @@ object BlobService { * @param contentType content type of the blob * @param contentLength length of the blob * @param payload actual payload, a [[akka.stream.javadsl.Source Source]] of [[akka.util.ByteString ByteString]] - * @param blobType type of the blob, ''Must be one of:'' __'''BlockBlob, PageBlob, or AppendBlob'''__ * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ @@ -114,15 +135,19 @@ object BlobService { contentType: ContentType, contentLength: Long, payload: Source[ByteString, _], - blobType: String = "BlockBlob", leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream - .putBlob(blobType, - objectPath, - contentType.asInstanceOf[ScalaContentType], - contentLength, - payload.asScala, - Option(leaseId.orElse(null))) + .putBlob( + objectPath, + HttpEntity(contentType.asInstanceOf[ScalaContentType], contentLength, payload.asScala), + StorageHeaders + .create() + .withContentLengthHeader(contentLength) + .withContentTypeHeader(contentType.asInstanceOf[ScalaContentType]) + .withLeaseIdHeader(Option(leaseId.orElse(null))) + .withBlobTypeHeader(BlobTypeHeader.BlockBlobHeader) + .headers + ) .map(opt => Optional.ofNullable(opt.orNull)) .asJava diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala index fe6c22898c..1300f1a75c 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala @@ -12,7 +12,8 @@ import akka.http.javadsl.model._ import akka.http.javadsl.model.headers.ByteRange import akka.http.scaladsl.model.headers.ByteRange.Slice import akka.http.scaladsl.model.headers.{ByteRange => ScalaByteRange} -import akka.http.scaladsl.model.{ContentType => ScalaContentType} +import akka.http.scaladsl.model.{HttpEntity, ContentType => ScalaContentType} +import akka.stream.alpakka.azure.storage.headers.FileWriteTypeHeader import akka.stream.alpakka.azure.storage.impl.AzureStorageStream import akka.stream.javadsl.Source import akka.stream.scaladsl.SourceToCompletionStage @@ -39,14 +40,21 @@ object FileService { def getFile(objectPath: String, range: ByteRange, versionId: Optional[String], - leaseId: Optional[String]): Source[ByteString, CompletionStage[ObjectMetadata]] = { - val scalaRange = range.asInstanceOf[ScalaByteRange] + leaseId: Optional[String]): Source[ByteString, CompletionStage[ObjectMetadata]] = new Source( AzureStorageStream - .getObject(FileType, objectPath, Some(scalaRange), Option(versionId.orElse(null)), Option(leaseId.orElse(null))) + .getObject( + FileType, + objectPath, + Option(versionId.orElse(null)), + StorageHeaders + .create() + .withRangeHeader(range.asInstanceOf[ScalaByteRange]) + .withLeaseIdHeader(Option(leaseId.orElse(null))) + .headers + ) .toCompletionStage() ) - } /** * Gets file representing `objectPath` with specified range (if applicable). @@ -62,7 +70,15 @@ object FileService { leaseId: Optional[String]): Source[ByteString, CompletionStage[ObjectMetadata]] = { new Source( AzureStorageStream - .getObject(FileType, objectPath, None, Option(versionId.orElse(null)), Option(leaseId.orElse(null))) + .getObject( + FileType, + objectPath, + Option(versionId.orElse(null)), + StorageHeaders + .create() + .withLeaseIdHeader(Option(leaseId.orElse(null))) + .headers + ) .toCompletionStage() ) } @@ -80,7 +96,10 @@ object FileService { versionId: Optional[String], leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream - .getObjectProperties(FileType, objectPath, Option(versionId.orElse(null)), Option(leaseId.orElse(null))) + .getObjectProperties(FileType, + objectPath, + Option(versionId.orElse(null)), + StorageHeaders.create().withLeaseIdHeader(Option(leaseId.orElse(null))).headers) .map(opt => Optional.ofNullable(opt.orNull)) .asJava @@ -97,7 +116,10 @@ object FileService { versionId: Optional[String], leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream - .deleteObject(FileType, objectPath, Option(versionId.orElse(null)), Option(leaseId.orElse(null))) + .deleteObject(FileType, + objectPath, + Option(versionId.orElse(null)), + StorageHeaders.create().withLeaseIdHeader(Option(leaseId.orElse(null))).headers) .map(opt => Optional.ofNullable(opt.orNull)) .asJava @@ -116,7 +138,16 @@ object FileService { maxSize: Long, leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream - .createFile(objectPath, contentType.asInstanceOf[ScalaContentType], maxSize, Option(leaseId.orElse(null))) + .createFile( + objectPath, + StorageHeaders + .create() + .withContentTypeHeader(contentType.asInstanceOf[ScalaContentType]) + .withFileMaxContentLengthHeader(maxSize) + .withFileTypeHeader() + .withLeaseIdHeader(Option(leaseId.orElse(null))) + .headers + ) .map(opt => Optional.ofNullable(opt.orNull)) .asJava @@ -135,15 +166,24 @@ object FileService { contentType: ContentType, range: Slice, payload: Source[ByteString, _], - leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = + leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = { + val contentLength = range.last - range.first + 1 AzureStorageStream - .updateOrClearRange(objectPath, - contentType.asInstanceOf[ScalaContentType], - range, - Some(payload.asScala), - Option(leaseId.orElse(null))) + .updateRange( + objectPath, + HttpEntity(contentType.asInstanceOf[ScalaContentType], contentLength, payload.asScala), + StorageHeaders + .create() + .withContentLengthHeader(contentLength) + .withContentTypeHeader(contentType.asInstanceOf[ScalaContentType]) + .withRangeHeader(range) + .withLeaseIdHeader(Option(leaseId.orElse(null))) + .withFileWriteTypeHeader(FileWriteTypeHeader.UpdateFileHeader) + .headers + ) .map(opt => Optional.ofNullable(opt.orNull)) .asJava + } /** * Clears specified range from the file. @@ -158,11 +198,16 @@ object FileService { range: Slice, leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream - .updateOrClearRange(objectPath, - ContentTypes.NO_CONTENT_TYPE.asInstanceOf[ScalaContentType], - range, - None, - Option(leaseId.orElse(null))) + .clearRange( + objectPath, + StorageHeaders + .create() + .withContentLengthHeader(0L) + .withRangeHeader(range) + .withLeaseIdHeader(Option(leaseId.orElse(null))) + .withFileWriteTypeHeader(FileWriteTypeHeader.ClearFileHeader) + .headers + ) .map(opt => Optional.ofNullable(opt.orNull)) .asJava } diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala index 9a0c77fe79..07bb35f680 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala @@ -8,8 +8,9 @@ package storage package scaladsl import akka.NotUsed -import akka.http.scaladsl.model.{ContentType, ContentTypes} +import akka.http.scaladsl.model.{ContentType, ContentTypes, HttpEntity} import akka.http.scaladsl.model.headers.ByteRange +import akka.stream.alpakka.azure.storage.headers.BlobTypeHeader import akka.stream.alpakka.azure.storage.impl.AzureStorageStream import akka.stream.scaladsl.Source import akka.util.ByteString @@ -35,7 +36,10 @@ object BlobService { range: Option[ByteRange] = None, versionId: Option[String] = None, leaseId: Option[String] = None): Source[ByteString, Future[ObjectMetadata]] = - AzureStorageStream.getObject(BlobType, objectPath, range, versionId, leaseId) + AzureStorageStream.getObject(BlobType, + objectPath, + versionId, + StorageHeaders().withRangeHeader(range).withLeaseIdHeader(leaseId).headers) /** * Gets blob properties. @@ -49,7 +53,10 @@ object BlobService { def getProperties(objectPath: String, versionId: Option[String] = None, leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.getObjectProperties(BlobType, objectPath, versionId, leaseId) + AzureStorageStream.getObjectProperties(BlobType, + objectPath, + versionId, + StorageHeaders().withLeaseIdHeader(leaseId).headers) /** * Deletes blob. @@ -63,7 +70,10 @@ object BlobService { def deleteBlob(objectPath: String, versionId: Option[String] = None, leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.deleteObject(BlobType, objectPath, versionId, leaseId) + AzureStorageStream.deleteObject(BlobType, + objectPath, + versionId, + StorageHeaders().withLeaseIdHeader(leaseId).headers) /** * Put blob. @@ -72,7 +82,6 @@ object BlobService { * @param contentType content type of the blob * @param contentLength length of the blob * @param payload actual payload, a [[akka.stream.scaladsl.Source Source]] of [[akka.util.ByteString ByteString]] - * @param blobType type of the blob, ''Must be one of:'' __'''BlockBlob, PageBlob, or AppendBlob'''__ * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ @@ -80,9 +89,17 @@ object BlobService { contentType: ContentType = ContentTypes.`application/octet-stream`, contentLength: Long, payload: Source[ByteString, _], - blobType: String = "BlockBlob", leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.putBlob(blobType, objectPath, contentType, contentLength, payload, leaseId) + AzureStorageStream.putBlob( + objectPath, + HttpEntity(contentType, contentLength, payload), + StorageHeaders() + .withContentLengthHeader(contentLength) + .withContentTypeHeader(contentType) + .withBlobTypeHeader(BlobTypeHeader.BlockBlobHeader) + .withLeaseIdHeader(leaseId) + .headers + ) /** * Create container. diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala index 673dc3edd5..a6d0a4fb6c 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala @@ -8,8 +8,9 @@ package storage package scaladsl import akka.NotUsed -import akka.http.scaladsl.model.{ContentType, ContentTypes} +import akka.http.scaladsl.model.{ContentType, ContentTypes, HttpEntity} import akka.http.scaladsl.model.headers.ByteRange +import akka.stream.alpakka.azure.storage.headers.FileWriteTypeHeader import akka.stream.alpakka.azure.storage.impl.AzureStorageStream import akka.stream.scaladsl.Source import akka.util.ByteString @@ -35,7 +36,10 @@ object FileService { range: Option[ByteRange] = None, versionId: Option[String] = None, leaseId: Option[String] = None): Source[ByteString, Future[ObjectMetadata]] = - AzureStorageStream.getObject(FileType, objectPath, range, versionId, leaseId) + AzureStorageStream.getObject(FileType, + objectPath, + versionId, + StorageHeaders().withRangeHeader(range).withLeaseIdHeader(leaseId).headers) /** * Gets file properties. @@ -49,7 +53,10 @@ object FileService { def getProperties(objectPath: String, versionId: Option[String] = None, leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.getObjectProperties(FileType, objectPath, versionId, leaseId) + AzureStorageStream.getObjectProperties(FileType, + objectPath, + versionId, + StorageHeaders().withLeaseIdHeader(leaseId).headers) /** * Deletes file. @@ -63,7 +70,10 @@ object FileService { def deleteFile(objectPath: String, versionId: Option[String] = None, leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.deleteObject(FileType, objectPath, versionId, leaseId) + AzureStorageStream.deleteObject(FileType, + objectPath, + versionId, + StorageHeaders().withLeaseIdHeader(leaseId).headers) /** * Creates a file. @@ -79,7 +89,15 @@ object FileService { contentType: ContentType = ContentTypes.`application/octet-stream`, maxSize: Long, leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.createFile(objectPath, contentType, maxSize, leaseId) + AzureStorageStream.createFile( + objectPath, + StorageHeaders() + .withContentTypeHeader(contentType) + .withFileMaxContentLengthHeader(maxSize) + .withFileTypeHeader() + .withLeaseIdHeader(leaseId) + .headers + ) /** * Updates file on the specified range. @@ -96,8 +114,20 @@ object FileService { contentType: ContentType = ContentTypes.`application/octet-stream`, range: ByteRange.Slice, payload: Source[ByteString, _], - leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.updateOrClearRange(objectPath, contentType, range, Some(payload), leaseId) + leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = { + val contentLength = range.last - range.first + 1 + AzureStorageStream.updateRange( + objectPath, + HttpEntity(contentType, contentLength, payload), + StorageHeaders() + .withContentLengthHeader(contentLength) + .withContentTypeHeader(contentType) + .withRangeHeader(range) + .withLeaseIdHeader(leaseId) + .withFileWriteTypeHeader(FileWriteTypeHeader.UpdateFileHeader) + .headers + ) + } /** * Clears specified range from the file. @@ -111,5 +141,13 @@ object FileService { def clearRange(objectPath: String, range: ByteRange.Slice, leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.updateOrClearRange(objectPath, ContentTypes.NoContentType, range, None, leaseId) + AzureStorageStream.clearRange( + objectPath, + StorageHeaders() + .withContentLengthHeader(0L) + .withRangeHeader(range) + .withLeaseIdHeader(leaseId) + .withFileWriteTypeHeader(FileWriteTypeHeader.ClearFileHeader) + .headers + ) } diff --git a/azure-storage/src/test/java/docs/javadsl/StorageTest.java b/azure-storage/src/test/java/docs/javadsl/StorageTest.java index 50e056a17f..9092d369ff 100644 --- a/azure-storage/src/test/java/docs/javadsl/StorageTest.java +++ b/azure-storage/src/test/java/docs/javadsl/StorageTest.java @@ -85,7 +85,7 @@ public void putBlob() throws Exception { ContentTypes.TEXT_PLAIN_UTF8, contentLength(), Source.single(ByteString.fromString(payload())), - "BlockBlob", Optional.empty()); + Optional.empty()); final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); //#put-blob From c10b3d11026e1a46a0409b74effb067f5d1e8534 Mon Sep 17 00:00:00 2001 From: sfali Date: Wed, 28 Aug 2024 19:18:13 -0400 Subject: [PATCH 47/78] rename "putBlob" to "putBlockBlob" #3253 --- .../azure/storage/impl/AzureStorageStream.scala | 6 +++--- .../azure/storage/javadsl/BlobService.scala | 14 +++++++------- .../azure/storage/scaladsl/BlobService.scala | 14 +++++++------- .../src/test/java/docs/javadsl/StorageTest.java | 6 +++--- .../storage/scaladsl/StorageIntegrationSpec.scala | 2 +- .../src/test/scala/docs/scaladsl/StorageSpec.scala | 6 +++--- docs/src/main/paradox/azure-storage.md | 8 ++++---- 7 files changed, 28 insertions(+), 28 deletions(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala index 900f433b7f..7ba504dca5 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala @@ -139,9 +139,9 @@ object AzureStorageStream { .mapMaterializedValue(_ => NotUsed) } - private[storage] def putBlob(objectPath: String, - httpEntity: UniversalEntity, - headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = { + private[storage] def putBlockBlob(objectPath: String, + httpEntity: UniversalEntity, + headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = { Source .fromMaterializer { (mat, attr) => implicit val system: ActorSystem = mat.system diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala index 0a195aa4bc..6b7441a52a 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala @@ -122,7 +122,7 @@ object BlobService { .asJava /** - * Put blob. + * Put Block blob. * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` * @param contentType content type of the blob @@ -131,13 +131,13 @@ object BlobService { * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ - def putBlob(objectPath: String, - contentType: ContentType, - contentLength: Long, - payload: Source[ByteString, _], - leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = + def putBlockBlob(objectPath: String, + contentType: ContentType, + contentLength: Long, + payload: Source[ByteString, _], + leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream - .putBlob( + .putBlockBlob( objectPath, HttpEntity(contentType.asInstanceOf[ScalaContentType], contentLength, payload.asScala), StorageHeaders diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala index 07bb35f680..8c7143780e 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala @@ -76,7 +76,7 @@ object BlobService { StorageHeaders().withLeaseIdHeader(leaseId).headers) /** - * Put blob. + * Put Block blob. * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` * @param contentType content type of the blob @@ -85,12 +85,12 @@ object BlobService { * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ - def putBlob(objectPath: String, - contentType: ContentType = ContentTypes.`application/octet-stream`, - contentLength: Long, - payload: Source[ByteString, _], - leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.putBlob( + def putBlockBlob(objectPath: String, + contentType: ContentType = ContentTypes.`application/octet-stream`, + contentLength: Long, + payload: Source[ByteString, _], + leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = + AzureStorageStream.putBlockBlob( objectPath, HttpEntity(contentType, contentLength, payload), StorageHeaders() diff --git a/azure-storage/src/test/java/docs/javadsl/StorageTest.java b/azure-storage/src/test/java/docs/javadsl/StorageTest.java index 9092d369ff..75840f6db2 100644 --- a/azure-storage/src/test/java/docs/javadsl/StorageTest.java +++ b/azure-storage/src/test/java/docs/javadsl/StorageTest.java @@ -79,16 +79,16 @@ public void createContainer() throws Exception { public void putBlob() throws Exception { mockPutBlob(); - //#put-blob + //#put-block-blob final Source, NotUsed> source = - BlobService.putBlob(containerName() + "/" + blobName(), + BlobService.putBlockBlob(containerName() + "/" + blobName(), ContentTypes.TEXT_PLAIN_UTF8, contentLength(), Source.single(ByteString.fromString(payload())), Optional.empty()); final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); - //#put-blob + //#put-block-blob final var optionalObjectMetadata = optionalCompletionStage.toCompletableFuture().get(); Assert.assertTrue(optionalObjectMetadata.isPresent()); diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageIntegrationSpec.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageIntegrationSpec.scala index 60ad5eb72a..3ea7ab5d58 100644 --- a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageIntegrationSpec.scala +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageIntegrationSpec.scala @@ -58,7 +58,7 @@ trait StorageIntegrationSpec "put blob" in { val maybeObjectMetadata = BlobService - .putBlob( + .putBlockBlob( objectPath = objectPath, contentType = ContentTypes.`text/plain(UTF-8)`, contentLength = contentLength, diff --git a/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala b/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala index b79a166e4d..665ef23e46 100644 --- a/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala +++ b/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala @@ -60,12 +60,12 @@ class StorageSpec "put blob" ignore { mockPutBlob() - //#put-blob + //#put-block-blob import akka.stream.alpakka.azure.storage.scaladsl.BlobService import akka.stream.alpakka.azure.storage.ObjectMetadata val source: Source[Option[ObjectMetadata], NotUsed] = - BlobService.putBlob( + BlobService.putBlockBlob( objectPath = s"$containerName/$blobName", contentType = ContentTypes.`text/plain(UTF-8)`, contentLength = contentLength, @@ -73,7 +73,7 @@ class StorageSpec ) val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.runWith(Sink.head) - //#put-blob + //#put-block-blob val maybeObjectMetadata = eventualMaybeMetadata.futureValue maybeObjectMetadata shouldBe defined diff --git a/docs/src/main/paradox/azure-storage.md b/docs/src/main/paradox/azure-storage.md index b197499408..1927be1892 100644 --- a/docs/src/main/paradox/azure-storage.md +++ b/docs/src/main/paradox/azure-storage.md @@ -68,15 +68,15 @@ Scala Java : @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.java) { #create-container } -### Put Blob +### Put Block Blob -The [`Put Blob`](https://learn.microsoft.com/en-us/rest/api/storageservices/put-blob) operation creates a new block, page, or append blob, or updates the content of an existing block blob. +The [`Put Block Blob`](https://learn.microsoft.com/en-us/rest/api/storageservices/put-blob) operation creates a new block or updates the content of an existing block blob. Scala -: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #put-blob } +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #put-block-blob } Java -: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.java) { #put-blob } +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.java) { #put-block-blob } ### Get Blob From 96601a846dc9b532aa6cdf78145e77d335b5a5b0 Mon Sep 17 00:00:00 2001 From: sfali Date: Wed, 28 Aug 2024 19:43:03 -0400 Subject: [PATCH 48/78] functions to put (create) page and append block #3253 --- .../azure/storage/StorageHeaders.scala | 7 ++- .../storage/impl/AzureStorageStream.scala | 19 ++++++ .../azure/storage/javadsl/BlobService.scala | 58 +++++++++++++++++++ .../azure/storage/scaladsl/BlobService.scala | 51 ++++++++++++++++ 4 files changed, 133 insertions(+), 2 deletions(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageHeaders.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageHeaders.scala index fbe16c4083..c80de1bf3a 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageHeaders.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageHeaders.scala @@ -65,8 +65,11 @@ private[storage] class StorageHeaders private (val contentLengthHeader: Option[H private[storage] def withPageBlobContentLengthHeader(contentLength: Long): StorageHeaders = copy(pageBlobContentLengthHeader = Some(RawHeader(PageBlobContentLengthHeaderKey, contentLength.toString))) - private[storage] def withPageBlobSequenceNumberHeader(sequenceNumber: Int): StorageHeaders = - copy(pageBlobSequenceNumberHeader = Some(RawHeader(PageBlobSequenceNumberHeaderKey, sequenceNumber.toString))) + private[storage] def withPageBlobSequenceNumberHeader(sequenceNumber: Option[Int]): StorageHeaders = + copy( + pageBlobSequenceNumberHeader = + sequenceNumber.map(value => RawHeader(PageBlobSequenceNumberHeaderKey, value.toString)) + ) private def copy(contentLengthHeader: Option[HttpHeader] = contentLengthHeader, contentTypeHeader: Option[HttpHeader] = contentTypeHeader, diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala index 7ba504dca5..166073fd91 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala @@ -160,6 +160,25 @@ object AzureStorageStream { .mapMaterializedValue(_ => NotUsed) } + private[storage] def putPageOrAppendBlock(objectPath: String, headers: Seq[HttpHeader]) = { + Source + .fromMaterializer { (mat, attr) => + implicit val system: ActorSystem = mat.system + val settings = resolveSettings(attr, system) + val request = + createRequest( + method = HttpMethods.PUT, + uri = createUri(settings = settings, + storageType = BlobType, + objectPath = objectPath, + queryString = createQueryString(settings)), + headers = headers + ) + handlePutRequest(request, settings) + } + .mapMaterializedValue(_ => NotUsed) + } + private[storage] def createFile(objectPath: String, headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = { Source diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala index 6b7441a52a..7a56df646d 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala @@ -151,6 +151,64 @@ object BlobService { .map(opt => Optional.ofNullable(opt.orNull)) .asJava + /** + * Put (Create) Page Blob. + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` + * @param contentType content type of the blob + * @param maxBlockSize maximum block size + * @param blobSequenceNumber optional block sequence number + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def putPageBlock(objectPath: String, + contentType: ContentType, + maxBlockSize: Long, + blobSequenceNumber: Option[Int], + leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed.type] = + AzureStorageStream + .putPageOrAppendBlock( + objectPath, + StorageHeaders + .create() + .withContentLengthHeader(0L) + .withContentTypeHeader(contentType.asInstanceOf[ScalaContentType]) + .withBlobTypeHeader(BlobTypeHeader.PageBlobHeader) + .withPageBlobContentLengthHeader(maxBlockSize) + .withPageBlobSequenceNumberHeader(blobSequenceNumber) + .withLeaseIdHeader(Option(leaseId.orElse(null))) + .headers + ) + .map(opt => Optional.ofNullable(opt.orNull)) + .asJava + + /** + * Put (Create) Append Blob. + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` + * @param contentType content type of the blob + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def putAppendBlock(objectPath: String, + contentType: ContentType, + leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed.type] = + AzureStorageStream + .putPageOrAppendBlock( + objectPath, + StorageHeaders + .create() + .withContentLengthHeader(0L) + .withContentTypeHeader(contentType.asInstanceOf[ScalaContentType]) + .withBlobTypeHeader(BlobTypeHeader.AppendBlobHeader) + .withLeaseIdHeader(Option(leaseId.orElse(null))) + .headers + ) + .map(opt => Optional.ofNullable(opt.orNull)) + .asJava + /** * Create container. * diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala index 8c7143780e..1cda6a1795 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala @@ -82,6 +82,7 @@ object BlobService { * @param contentType content type of the blob * @param contentLength length of the blob * @param payload actual payload, a [[akka.stream.scaladsl.Source Source]] of [[akka.util.ByteString ByteString]] + * @param leaseId lease ID of an active lease (if applicable) * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ @@ -101,6 +102,56 @@ object BlobService { .headers ) + /** + * Put (Create) Page Blob. + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` + * @param contentType content type of the blob + * @param maxBlockSize maximum block size + * @param blobSequenceNumber optional block sequence number + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def putPageBlock(objectPath: String, + contentType: ContentType = ContentTypes.`application/octet-stream`, + maxBlockSize: Long, + blobSequenceNumber: Option[Int] = None, + leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = + AzureStorageStream.putPageOrAppendBlock( + objectPath, + StorageHeaders() + .withContentLengthHeader(0L) + .withContentTypeHeader(contentType) + .withBlobTypeHeader(BlobTypeHeader.PageBlobHeader) + .withPageBlobContentLengthHeader(maxBlockSize) + .withPageBlobSequenceNumberHeader(blobSequenceNumber) + .withLeaseIdHeader(leaseId) + .headers + ) + + /** + * Put (Create) Append Blob. + * + * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` + * @param contentType content type of the blob + * @param leaseId lease ID of an active lease (if applicable) + * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def putAppendBlock(objectPath: String, + contentType: ContentType = ContentTypes.`application/octet-stream`, + leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = + AzureStorageStream.putPageOrAppendBlock( + objectPath, + StorageHeaders() + .withContentLengthHeader(0L) + .withContentTypeHeader(contentType) + .withBlobTypeHeader(BlobTypeHeader.AppendBlobHeader) + .withLeaseIdHeader(leaseId) + .headers + ) + /** * Create container. * From a4e06871aa75de9549aebaaad1d1242d73671a33 Mon Sep 17 00:00:00 2001 From: sfali Date: Wed, 28 Aug 2024 21:04:13 -0400 Subject: [PATCH 49/78] unit tests for put (create) page and append block #3253 --- .../azure/storage/scaladsl/BlobService.scala | 2 - .../test/java/docs/javadsl/StorageTest.java | 41 ++++++++++++- .../scaladsl/StorageWireMockBase.scala | 31 +++++++++- .../scala/docs/scaladsl/StorageSpec.scala | 58 ++++++++++++++++++- 4 files changed, 125 insertions(+), 7 deletions(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala index 1cda6a1795..998c0e2314 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala @@ -121,7 +121,6 @@ object BlobService { AzureStorageStream.putPageOrAppendBlock( objectPath, StorageHeaders() - .withContentLengthHeader(0L) .withContentTypeHeader(contentType) .withBlobTypeHeader(BlobTypeHeader.PageBlobHeader) .withPageBlobContentLengthHeader(maxBlockSize) @@ -145,7 +144,6 @@ object BlobService { AzureStorageStream.putPageOrAppendBlock( objectPath, StorageHeaders() - .withContentLengthHeader(0L) .withContentTypeHeader(contentType) .withBlobTypeHeader(BlobTypeHeader.AppendBlobHeader) .withLeaseIdHeader(leaseId) diff --git a/azure-storage/src/test/java/docs/javadsl/StorageTest.java b/azure-storage/src/test/java/docs/javadsl/StorageTest.java index 75840f6db2..5b4adf7e48 100644 --- a/azure-storage/src/test/java/docs/javadsl/StorageTest.java +++ b/azure-storage/src/test/java/docs/javadsl/StorageTest.java @@ -76,8 +76,8 @@ public void createContainer() throws Exception { // header, secondly once that resolve then we get `akka.http.scaladsl.model.EntityStreamException`. @Ignore("Fix this test case") @Test - public void putBlob() throws Exception { - mockPutBlob(); + public void putBlockBlob() throws Exception { + mockPutBlockBlob(); //#put-block-blob final Source, NotUsed> source = @@ -94,6 +94,43 @@ public void putBlob() throws Exception { Assert.assertTrue(optionalObjectMetadata.isPresent()); } + @Test + public void putPageBlob() throws Exception { + mockPutPageBlob(); + + //#put-page-blob + final Source, NotUsed> source = + BlobService.putPageBlock(containerName() + "/" + blobName(), + ContentTypes.TEXT_PLAIN_UTF8, + 512L, + Optional.of(0), + Optional.empty()); + + final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); + //#put-page-blob + + final var optionalObjectMetadata = optionalCompletionStage.toCompletableFuture().get(); + Assert.assertTrue(optionalObjectMetadata.isPresent()); + } + + @Test + public void putAppendBlob() throws Exception { + mockPutAppendBlob(); + + //#put-append-blob + final Source, NotUsed> source = + BlobService.putAppendBlock(containerName() + "/" + blobName(), + ContentTypes.TEXT_PLAIN_UTF8, + Optional.empty()); + + final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); + //#put-append-blob + + final var optionalObjectMetadata = optionalCompletionStage.toCompletableFuture().get(); + Assert.assertTrue(optionalObjectMetadata.isPresent()); + } + + @Test public void getBlob() throws Exception { mockGetBlob(); diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala index d1cdc7bf50..3eee9abbf3 100644 --- a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala @@ -54,9 +54,10 @@ abstract class StorageWireMockBase(_system: ActorSystem, val _wireMockServer: Wi ) ) - protected def mockPutBlob(): StubMapping = + protected def mockPutBlockBlob(): StubMapping = mock.register( put(urlEqualTo(s"/$AccountName/$containerName/$blobName")) + .withHeader(BlobTypeHeaderKey, equalTo(BlockBlobType)) .withRequestBody(equalTo(payload)) .willReturn( aResponse() @@ -67,6 +68,34 @@ abstract class StorageWireMockBase(_system: ActorSystem, val _wireMockServer: Wi ) ) + protected def mockPutPageBlob(): StubMapping = + mock.register( + put(urlEqualTo(s"/$AccountName/$containerName/$blobName")) + .withHeader(BlobTypeHeaderKey, equalTo(PageBlobType)) + .withHeader(PageBlobContentLengthHeaderKey, equalTo("512")) + .withHeader(PageBlobSequenceNumberHeaderKey, equalTo("0")) + .willReturn( + aResponse() + .withStatus(201) + .withHeader(ETag.name, ETagValue) + .withHeader(`Content-Length`.name, "0") + .withHeader(`Content-Type`.name, "text/plain; charset=UTF-8") + ) + ) + + protected def mockPutAppendBlob(): StubMapping = + mock.register( + put(urlEqualTo(s"/$AccountName/$containerName/$blobName")) + .withHeader(BlobTypeHeaderKey, equalTo(AppendBlobType)) + .willReturn( + aResponse() + .withStatus(201) + .withHeader(ETag.name, ETagValue) + .withHeader(`Content-Length`.name, "0") + .withHeader(`Content-Type`.name, "text/plain; charset=UTF-8") + ) + ) + protected def mockGetBlob(): StubMapping = mock.register( get(urlEqualTo(s"/$AccountName/$containerName/$blobName")) diff --git a/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala b/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala index 665ef23e46..ffb5dfa3ba 100644 --- a/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala +++ b/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala @@ -57,8 +57,8 @@ class StorageSpec // TODO: There are couple of issues, firstly there are two `Content-Length` headers being added, one by `putBlob` // function and secondly by, most likely, by WireMock. Need to to figure out how to tell WireMock not to add `Content-Length` // header, secondly once that resolve then we get `akka.http.scaladsl.model.EntityStreamException`. - "put blob" ignore { - mockPutBlob() + "put block blob" ignore { + mockPutBlockBlob() //#put-block-blob import akka.stream.alpakka.azure.storage.scaladsl.BlobService @@ -82,6 +82,60 @@ class StorageSpec objectMetadata.eTag shouldBe Some(ETagRawValue) } + // TODO: There are couple of issues, firstly there are two `Content-Length` headers being added, one by `putBlob` + // function and secondly by, most likely, by WireMock. Need to to figure out how to tell WireMock not to add `Content-Length` + // header, secondly once that resolve then we get `akka.http.scaladsl.model.EntityStreamException`. + "put page blob" in { + mockPutPageBlob() + + //#put-page-blob + import akka.stream.alpakka.azure.storage.scaladsl.BlobService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[Option[ObjectMetadata], NotUsed] = + BlobService.putPageBlock( + objectPath = s"$containerName/$blobName", + contentType = ContentTypes.`text/plain(UTF-8)`, + maxBlockSize = 512L, + blobSequenceNumber = Some(0) + ) + + val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.runWith(Sink.head) + //#put-page-blob + + val maybeObjectMetadata = eventualMaybeMetadata.futureValue + maybeObjectMetadata shouldBe defined + val objectMetadata = maybeObjectMetadata.get + objectMetadata.contentLength shouldBe 0L + objectMetadata.eTag shouldBe Some(ETagRawValue) + } + + // TODO: There are couple of issues, firstly there are two `Content-Length` headers being added, one by `putBlob` + // function and secondly by, most likely, by WireMock. Need to to figure out how to tell WireMock not to add `Content-Length` + // header, secondly once that resolve then we get `akka.http.scaladsl.model.EntityStreamException`. + "put append blob" in { + mockPutAppendBlob() + + //#put-append-blob + import akka.stream.alpakka.azure.storage.scaladsl.BlobService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[Option[ObjectMetadata], NotUsed] = + BlobService.putAppendBlock( + objectPath = s"$containerName/$blobName", + contentType = ContentTypes.`text/plain(UTF-8)` + ) + + val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.runWith(Sink.head) + //#put-append-blob + + val maybeObjectMetadata = eventualMaybeMetadata.futureValue + maybeObjectMetadata shouldBe defined + val objectMetadata = maybeObjectMetadata.get + objectMetadata.contentLength shouldBe 0L + objectMetadata.eTag shouldBe Some(ETagRawValue) + } + "get blob" in { mockGetBlob() From dc22e087d2e4744a8190a6a9820dd70b67112d27 Mon Sep 17 00:00:00 2001 From: sfali Date: Wed, 28 Aug 2024 21:23:22 -0400 Subject: [PATCH 50/78] fix return types #3253 --- .../azure/storage/impl/AzureStorageStream.scala | 3 ++- .../alpakka/azure/storage/javadsl/BlobService.scala | 12 ++++++------ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala index 166073fd91..7d26366ab0 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala @@ -160,7 +160,8 @@ object AzureStorageStream { .mapMaterializedValue(_ => NotUsed) } - private[storage] def putPageOrAppendBlock(objectPath: String, headers: Seq[HttpHeader]) = { + private[storage] def putPageOrAppendBlock(objectPath: String, + headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = { Source .fromMaterializer { (mat, attr) => implicit val system: ActorSystem = mat.system diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala index 7a56df646d..dece70e947 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala @@ -165,18 +165,19 @@ object BlobService { def putPageBlock(objectPath: String, contentType: ContentType, maxBlockSize: Long, - blobSequenceNumber: Option[Int], - leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed.type] = + blobSequenceNumber: Optional[Int], + leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream .putPageOrAppendBlock( objectPath, StorageHeaders .create() - .withContentLengthHeader(0L) .withContentTypeHeader(contentType.asInstanceOf[ScalaContentType]) .withBlobTypeHeader(BlobTypeHeader.PageBlobHeader) .withPageBlobContentLengthHeader(maxBlockSize) - .withPageBlobSequenceNumberHeader(blobSequenceNumber) + .withPageBlobSequenceNumberHeader( + Option(blobSequenceNumber.map(i => Integer.getInteger(i.toString)).orElse(null)) + ) .withLeaseIdHeader(Option(leaseId.orElse(null))) .headers ) @@ -194,13 +195,12 @@ object BlobService { */ def putAppendBlock(objectPath: String, contentType: ContentType, - leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed.type] = + leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream .putPageOrAppendBlock( objectPath, StorageHeaders .create() - .withContentLengthHeader(0L) .withContentTypeHeader(contentType.asInstanceOf[ScalaContentType]) .withBlobTypeHeader(BlobTypeHeader.AppendBlobHeader) .withLeaseIdHeader(Option(leaseId.orElse(null))) From 562950e61b699064c4f9a1d13a3de6133f2519b5 Mon Sep 17 00:00:00 2001 From: sfali Date: Wed, 28 Aug 2024 21:34:57 -0400 Subject: [PATCH 51/78] rename in order to accommodate page blob header #3253 --- .../stream/alpakka/azure/storage/StorageHeaders.scala | 4 ++-- .../stream/alpakka/azure/storage/headers/headers.scala | 10 +++++----- .../alpakka/azure/storage/javadsl/FileService.scala | 6 +++--- .../alpakka/azure/storage/scaladsl/FileService.scala | 6 +++--- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageHeaders.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageHeaders.scala index c80de1bf3a..1be2054589 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageHeaders.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageHeaders.scala @@ -8,7 +8,7 @@ package storage import akka.http.scaladsl.model.headers.{ByteRange, RawHeader, Range => RangeHeader} import akka.http.scaladsl.model.{ContentType, HttpHeader} -import headers.{BlobTypeHeader, CustomContentLengthHeader, CustomContentTypeHeader, FileWriteTypeHeader} +import headers.{BlobTypeHeader, CustomContentLengthHeader, CustomContentTypeHeader, RangeWriteTypeHeader} import java.util.Objects @@ -53,7 +53,7 @@ private[storage] class StorageHeaders private (val contentLengthHeader: Option[H private[storage] def withLeaseIdHeader(leaseId: Option[String]): StorageHeaders = copy(leaseIdHeader = leaseId.map(value => RawHeader(LeaseIdHeaderKey, value))) - private[storage] def withFileWriteTypeHeader(fileWriteTypeHeader: FileWriteTypeHeader): StorageHeaders = + private[storage] def withFileWriteTypeHeader(fileWriteTypeHeader: RangeWriteTypeHeader): StorageHeaders = copy(fileWriteTypeHeader = Some(fileWriteTypeHeader.header)) private[storage] def withFileTypeHeader(): StorageHeaders = diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/headers/headers.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/headers/headers.scala index bfb1e09db9..1a199fd396 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/headers/headers.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/headers/headers.scala @@ -41,11 +41,11 @@ object BlobTypeHeader { private[storage] val AppendBlobHeader = new BlobTypeHeader(AppendBlobType) } -private[storage] case class FileWriteTypeHeader(writeType: String) { - @InternalApi private[storage] def header: HttpHeader = RawHeader(FileWriteTypeHeaderKey, writeType) +private[storage] case class RangeWriteTypeHeader(headerName: String, writeType: String) { + @InternalApi private[storage] def header: HttpHeader = RawHeader(headerName, writeType) } -object FileWriteTypeHeader { - private[storage] val UpdateFileHeader = new FileWriteTypeHeader("update") - private[storage] val ClearFileHeader = new FileWriteTypeHeader("clear") +object RangeWriteTypeHeader { + private[storage] val UpdateFileHeader = new RangeWriteTypeHeader(FileWriteTypeHeaderKey, "update") + private[storage] val ClearFileHeader = new RangeWriteTypeHeader(FileWriteTypeHeaderKey, "clear") } diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala index 1300f1a75c..ea65cf70b7 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala @@ -13,7 +13,7 @@ import akka.http.javadsl.model.headers.ByteRange import akka.http.scaladsl.model.headers.ByteRange.Slice import akka.http.scaladsl.model.headers.{ByteRange => ScalaByteRange} import akka.http.scaladsl.model.{HttpEntity, ContentType => ScalaContentType} -import akka.stream.alpakka.azure.storage.headers.FileWriteTypeHeader +import akka.stream.alpakka.azure.storage.headers.RangeWriteTypeHeader import akka.stream.alpakka.azure.storage.impl.AzureStorageStream import akka.stream.javadsl.Source import akka.stream.scaladsl.SourceToCompletionStage @@ -178,7 +178,7 @@ object FileService { .withContentTypeHeader(contentType.asInstanceOf[ScalaContentType]) .withRangeHeader(range) .withLeaseIdHeader(Option(leaseId.orElse(null))) - .withFileWriteTypeHeader(FileWriteTypeHeader.UpdateFileHeader) + .withFileWriteTypeHeader(RangeWriteTypeHeader.UpdateFileHeader) .headers ) .map(opt => Optional.ofNullable(opt.orNull)) @@ -205,7 +205,7 @@ object FileService { .withContentLengthHeader(0L) .withRangeHeader(range) .withLeaseIdHeader(Option(leaseId.orElse(null))) - .withFileWriteTypeHeader(FileWriteTypeHeader.ClearFileHeader) + .withFileWriteTypeHeader(RangeWriteTypeHeader.ClearFileHeader) .headers ) .map(opt => Optional.ofNullable(opt.orNull)) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala index a6d0a4fb6c..d42a85ea1f 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala @@ -10,7 +10,7 @@ package scaladsl import akka.NotUsed import akka.http.scaladsl.model.{ContentType, ContentTypes, HttpEntity} import akka.http.scaladsl.model.headers.ByteRange -import akka.stream.alpakka.azure.storage.headers.FileWriteTypeHeader +import akka.stream.alpakka.azure.storage.headers.RangeWriteTypeHeader import akka.stream.alpakka.azure.storage.impl.AzureStorageStream import akka.stream.scaladsl.Source import akka.util.ByteString @@ -124,7 +124,7 @@ object FileService { .withContentTypeHeader(contentType) .withRangeHeader(range) .withLeaseIdHeader(leaseId) - .withFileWriteTypeHeader(FileWriteTypeHeader.UpdateFileHeader) + .withFileWriteTypeHeader(RangeWriteTypeHeader.UpdateFileHeader) .headers ) } @@ -147,7 +147,7 @@ object FileService { .withContentLengthHeader(0L) .withRangeHeader(range) .withLeaseIdHeader(leaseId) - .withFileWriteTypeHeader(FileWriteTypeHeader.ClearFileHeader) + .withFileWriteTypeHeader(RangeWriteTypeHeader.ClearFileHeader) .headers ) } From e90a7170f3f21fd6cc12b3aa2dd91d7c2145c42c Mon Sep 17 00:00:00 2001 From: sfali Date: Wed, 28 Aug 2024 21:39:33 -0400 Subject: [PATCH 52/78] fix compilation #3253 --- .../akka/stream/alpakka/azure/storage/javadsl/BlobService.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala index dece70e947..b5b86ad9fa 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala @@ -176,7 +176,7 @@ object BlobService { .withBlobTypeHeader(BlobTypeHeader.PageBlobHeader) .withPageBlobContentLengthHeader(maxBlockSize) .withPageBlobSequenceNumberHeader( - Option(blobSequenceNumber.map(i => Integer.getInteger(i.toString)).orElse(null)) + Option(blobSequenceNumber.orElse(null.asInstanceOf[Integer])) ) .withLeaseIdHeader(Option(leaseId.orElse(null))) .headers From 70466ce109b6eb670e392356a5193f2089973929 Mon Sep 17 00:00:00 2001 From: sfali Date: Thu, 29 Aug 2024 06:10:59 -0400 Subject: [PATCH 53/78] Headers for page blob update #3253 1. Rename file write header 2. Added header for page blob for future implementation --- .../azure/storage/StorageHeaders.scala | 20 +++++++++---------- .../azure/storage/headers/headers.scala | 2 ++ .../alpakka/azure/storage/package.scala | 1 + 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageHeaders.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageHeaders.scala index 1be2054589..d9e2a745fc 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageHeaders.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageHeaders.scala @@ -18,7 +18,7 @@ private[storage] class StorageHeaders private (val contentLengthHeader: Option[H val blobTypeHeader: Option[HttpHeader] = None, val leaseIdHeader: Option[HttpHeader] = None, val fileWriteTypeHeader: Option[HttpHeader] = None, - val fileTypeHeader: Option[HttpHeader] = None, + val rangeWriteTypeHeader: Option[HttpHeader] = None, val fileMaxContentLengthHeader: Option[HttpHeader] = None, val pageBlobContentLengthHeader: Option[HttpHeader] = None, val pageBlobSequenceNumberHeader: Option[HttpHeader] = None) { @@ -30,7 +30,7 @@ private[storage] class StorageHeaders private (val contentLengthHeader: Option[H blobTypeHeader ++ leaseIdHeader ++ fileWriteTypeHeader ++ - fileTypeHeader ++ + rangeWriteTypeHeader ++ fileMaxContentLengthHeader ++ pageBlobContentLengthHeader ++ pageBlobSequenceNumberHeader).toSeq @@ -53,8 +53,8 @@ private[storage] class StorageHeaders private (val contentLengthHeader: Option[H private[storage] def withLeaseIdHeader(leaseId: Option[String]): StorageHeaders = copy(leaseIdHeader = leaseId.map(value => RawHeader(LeaseIdHeaderKey, value))) - private[storage] def withFileWriteTypeHeader(fileWriteTypeHeader: RangeWriteTypeHeader): StorageHeaders = - copy(fileWriteTypeHeader = Some(fileWriteTypeHeader.header)) + private[storage] def withRangeWriteTypeHeader(fileWriteTypeHeader: RangeWriteTypeHeader): StorageHeaders = + copy(rangeWriteTypeHeader = Some(fileWriteTypeHeader.header)) private[storage] def withFileTypeHeader(): StorageHeaders = copy(fileTypeHeader = Some(RawHeader(FileTypeHeaderKey, "file"))) @@ -76,8 +76,8 @@ private[storage] class StorageHeaders private (val contentLengthHeader: Option[H rangeHeader: Option[HttpHeader] = rangeHeader, blobTypeHeader: Option[HttpHeader] = blobTypeHeader, leaseIdHeader: Option[HttpHeader] = leaseIdHeader, - fileWriteTypeHeader: Option[HttpHeader] = fileWriteTypeHeader, - fileTypeHeader: Option[HttpHeader] = fileTypeHeader, + rangeWriteTypeHeader: Option[HttpHeader] = fileWriteTypeHeader, + fileTypeHeader: Option[HttpHeader] = rangeWriteTypeHeader, fileMaxContentLengthHeader: Option[HttpHeader] = fileMaxContentLengthHeader, pageBlobContentLengthHeader: Option[HttpHeader] = pageBlobContentLengthHeader, pageBlobSequenceNumberHeader: Option[HttpHeader] = pageBlobSequenceNumberHeader) = @@ -87,8 +87,8 @@ private[storage] class StorageHeaders private (val contentLengthHeader: Option[H rangeHeader = rangeHeader, blobTypeHeader = blobTypeHeader, leaseIdHeader = leaseIdHeader, - fileWriteTypeHeader = fileWriteTypeHeader, - fileTypeHeader = fileTypeHeader, + fileWriteTypeHeader = rangeWriteTypeHeader, + rangeWriteTypeHeader = fileTypeHeader, fileMaxContentLengthHeader = fileMaxContentLengthHeader, pageBlobContentLengthHeader = pageBlobContentLengthHeader, pageBlobSequenceNumberHeader = pageBlobSequenceNumberHeader @@ -102,7 +102,7 @@ private[storage] class StorageHeaders private (val contentLengthHeader: Option[H | blobTypeHeader=${blobTypeHeader.map(_.value()).getOrElse("None")}, | leaseIdHeader=${leaseIdHeader.map(_.value()).getOrElse("None")}, | fileWriteTypeHeader=${fileWriteTypeHeader.map(_.value()).getOrElse("None")}, - | fileTypeHeader=${fileTypeHeader.map(_.value()).getOrElse("None")}, + | fileTypeHeader=${rangeWriteTypeHeader.map(_.value()).getOrElse("None")}, | fileMaxContentLengthHeader=${fileMaxContentLengthHeader.map(_.value()).getOrElse("None")}, | pageBlobContentLengthHeader=${pageBlobContentLengthHeader.map(_.value()).getOrElse("None")}, | pageBlobSequenceNumberHeader=${pageBlobSequenceNumberHeader.map(_.value()).getOrElse("None")} @@ -131,7 +131,7 @@ private[storage] class StorageHeaders private (val contentLengthHeader: Option[H blobTypeHeader, leaseIdHeader, fileWriteTypeHeader, - fileTypeHeader, + rangeWriteTypeHeader, fileMaxContentLengthHeader, pageBlobContentLengthHeader, pageBlobSequenceNumberHeader diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/headers/headers.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/headers/headers.scala index 1a199fd396..23a0f9fbef 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/headers/headers.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/headers/headers.scala @@ -48,4 +48,6 @@ private[storage] case class RangeWriteTypeHeader(headerName: String, writeType: object RangeWriteTypeHeader { private[storage] val UpdateFileHeader = new RangeWriteTypeHeader(FileWriteTypeHeaderKey, "update") private[storage] val ClearFileHeader = new RangeWriteTypeHeader(FileWriteTypeHeaderKey, "clear") + private[storage] val UpdatePageHeader = new RangeWriteTypeHeader(PageWriteTypeHeaderKey, "update") + private[storage] val ClearPageHeader = new RangeWriteTypeHeader(PageWriteTypeHeaderKey, "clear") } diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala index 0595bacf3a..a1f7c2088a 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala @@ -19,6 +19,7 @@ package object storage { private[storage] val BlobTypeHeaderKey = "x-ms-blob-type" private[storage] val LeaseIdHeaderKey = "x-ms-lease-id" private[storage] val FileWriteTypeHeaderKey = "x-ms-write" + private[storage] val PageWriteTypeHeaderKey = "x-ms-page-write" private[storage] val XMsContentLengthHeaderKey = "x-ms-content-length" private[storage] val FileTypeHeaderKey = "x-ms-type" private[storage] val PageBlobContentLengthHeaderKey = "x-ms-blob-content-length" From f167907cd0b230010560be1167c72bff7773870c Mon Sep 17 00:00:00 2001 From: sfali Date: Thu, 29 Aug 2024 06:11:17 -0400 Subject: [PATCH 54/78] updated rename field #3253 --- .../stream/alpakka/azure/storage/javadsl/FileService.scala | 4 ++-- .../stream/alpakka/azure/storage/scaladsl/FileService.scala | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala index ea65cf70b7..f948e3ace3 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala @@ -178,7 +178,7 @@ object FileService { .withContentTypeHeader(contentType.asInstanceOf[ScalaContentType]) .withRangeHeader(range) .withLeaseIdHeader(Option(leaseId.orElse(null))) - .withFileWriteTypeHeader(RangeWriteTypeHeader.UpdateFileHeader) + .withRangeWriteTypeHeader(RangeWriteTypeHeader.UpdateFileHeader) .headers ) .map(opt => Optional.ofNullable(opt.orNull)) @@ -205,7 +205,7 @@ object FileService { .withContentLengthHeader(0L) .withRangeHeader(range) .withLeaseIdHeader(Option(leaseId.orElse(null))) - .withFileWriteTypeHeader(RangeWriteTypeHeader.ClearFileHeader) + .withRangeWriteTypeHeader(RangeWriteTypeHeader.ClearFileHeader) .headers ) .map(opt => Optional.ofNullable(opt.orNull)) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala index d42a85ea1f..c114ed345a 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala @@ -124,7 +124,7 @@ object FileService { .withContentTypeHeader(contentType) .withRangeHeader(range) .withLeaseIdHeader(leaseId) - .withFileWriteTypeHeader(RangeWriteTypeHeader.UpdateFileHeader) + .withRangeWriteTypeHeader(RangeWriteTypeHeader.UpdateFileHeader) .headers ) } @@ -147,7 +147,7 @@ object FileService { .withContentLengthHeader(0L) .withRangeHeader(range) .withLeaseIdHeader(leaseId) - .withFileWriteTypeHeader(RangeWriteTypeHeader.ClearFileHeader) + .withRangeWriteTypeHeader(RangeWriteTypeHeader.ClearFileHeader) .headers ) } From c750818aff907cdc4d31cce039253ba6feb533de Mon Sep 17 00:00:00 2001 From: sfali Date: Thu, 29 Aug 2024 06:49:18 -0400 Subject: [PATCH 55/78] extracted common function and delegate all functions to this common function #3253 --- .../storage/impl/AzureStorageStream.scala | 146 +++++------------- .../azure/storage/javadsl/BlobService.scala | 10 +- .../azure/storage/scaladsl/BlobService.scala | 10 +- 3 files changed, 53 insertions(+), 113 deletions(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala index 7d26366ab0..a83699ffff 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala @@ -15,6 +15,7 @@ import akka.http.scaladsl.model.StatusCodes.{Accepted, Created, NotFound, OK} import akka.http.scaladsl.model.headers.{`Content-Length`, `Content-Type`, CustomHeader} import akka.http.scaladsl.model.{ ContentType, + HttpEntity, HttpHeader, HttpMethod, HttpMethods, @@ -40,7 +41,7 @@ object AzureStorageStream { private[storage] def getObject(storageType: String, objectPath: String, versionId: Option[String], - headers: Seq[HttpHeader]): Source[ByteString, Future[ObjectMetadata]] = { + headers: Seq[HttpHeader]): Source[ByteString, Future[ObjectMetadata]] = Source .fromMaterializer { (mat, attr) => implicit val system: ActorSystem = mat.system @@ -71,12 +72,11 @@ object AzureStorageStream { .mapMaterializedValue(_ => objectMetadataMat.future) } .mapMaterializedValue(_.flatMap(identity)(ExecutionContexts.parasitic)) - } private[storage] def getObjectProperties(storageType: String, objectPath: String, versionId: Option[String], - headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = { + headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = Source .fromMaterializer { (mat, attr) => implicit val system: ActorSystem = mat.system @@ -104,12 +104,11 @@ object AzureStorageStream { } } .mapMaterializedValue(_ => NotUsed) - } private[storage] def deleteObject(storageType: String, objectPath: String, versionId: Option[String], - headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = { + headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = Source .fromMaterializer { (mat, attr) => implicit val system: ActorSystem = mat.system @@ -137,128 +136,65 @@ object AzureStorageStream { } } .mapMaterializedValue(_ => NotUsed) - } - private[storage] def putBlockBlob(objectPath: String, - httpEntity: UniversalEntity, - headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = { - Source - .fromMaterializer { (mat, attr) => - implicit val system: ActorSystem = mat.system - val settings = resolveSettings(attr, system) - val request = - createRequest( - method = HttpMethods.PUT, - uri = createUri(settings = settings, - storageType = BlobType, - objectPath = objectPath, - queryString = createQueryString(settings)), - headers = headers - ).withEntity(httpEntity) - handlePutRequest(request, settings) - } - .mapMaterializedValue(_ => NotUsed) - } - - private[storage] def putPageOrAppendBlock(objectPath: String, - headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = { - Source - .fromMaterializer { (mat, attr) => - implicit val system: ActorSystem = mat.system - val settings = resolveSettings(attr, system) - val request = - createRequest( - method = HttpMethods.PUT, - uri = createUri(settings = settings, - storageType = BlobType, - objectPath = objectPath, - queryString = createQueryString(settings)), - headers = headers - ) - handlePutRequest(request, settings) - } - .mapMaterializedValue(_ => NotUsed) - } + private[storage] def putBlob(objectPath: String, + maybeHttpEntity: Option[UniversalEntity], + headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = + putRequest(objectPath = objectPath, storageType = BlobType, maybeHttpEntity = maybeHttpEntity, headers = headers) private[storage] def createFile(objectPath: String, - headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = { - Source - .fromMaterializer { (mat, attr) => - implicit val system: ActorSystem = mat.system - val settings = resolveSettings(attr, system) - val request = - createRequest( - method = HttpMethods.PUT, - uri = createUri(settings = settings, - storageType = FileType, - objectPath = objectPath, - queryString = createQueryString(settings)), - headers = headers - ) - handlePutRequest(request, settings) - } - .mapMaterializedValue(_ => NotUsed) - } + headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = + putRequest(objectPath = objectPath, storageType = FileType, headers = headers) private[storage] def updateRange(objectPath: String, httpEntity: UniversalEntity, - headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = { - Source - .fromMaterializer { (mat, attr) => - implicit val system: ActorSystem = mat.system - val settings = resolveSettings(attr, system) - val request = - createRequest( - method = HttpMethods.PUT, - uri = createUri(settings = settings, - storageType = FileType, - objectPath = objectPath, - queryString = createQueryString(settings, Some("comp=range"))), - headers = headers - ).withEntity(httpEntity) - handlePutRequest(request, settings) - } - .mapMaterializedValue(_ => NotUsed) - } + headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = + putRequest(objectPath = objectPath, + storageType = FileType, + maybeHttpEntity = Some(httpEntity), + queryString = Some("comp=range"), + headers = headers) private[storage] def clearRange(objectPath: String, - headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = { - Source - .fromMaterializer { (mat, attr) => - implicit val system: ActorSystem = mat.system - val settings = resolveSettings(attr, system) - val request = - createRequest( - method = HttpMethods.PUT, - uri = createUri(settings = settings, - storageType = FileType, - objectPath = objectPath, - queryString = createQueryString(settings, Some("comp=range"))), - headers = headers - ) - handlePutRequest(request, settings) - } - .mapMaterializedValue(_ => NotUsed) - } + headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = + putRequest(objectPath = objectPath, storageType = FileType, queryString = Some("comp=range"), headers = headers) private[storage] def createContainer(objectPath: String): Source[Option[ObjectMetadata], NotUsed] = + putRequest(objectPath = objectPath, storageType = BlobType, queryString = Some("restype=container")) + + /** + * Common function for "PUT" request where we don't expect response body. + * + * @param objectPath path of the object. + * @param storageType storage type + * @param maybeHttpEntity optional http entity + * @param queryString query string + * @param headers request headers + * @return Source with metadata containing response headers + */ + private def putRequest(objectPath: String, + storageType: String, + maybeHttpEntity: Option[UniversalEntity] = None, + queryString: Option[String] = None, + headers: Seq[HttpHeader] = Seq.empty): Source[Option[ObjectMetadata], NotUsed] = { Source .fromMaterializer { (mat, attr) => implicit val system: ActorSystem = mat.system val settings = resolveSettings(attr, system) + val httpEntity = maybeHttpEntity.getOrElse(HttpEntity.Empty) val request = createRequest( method = HttpMethods.PUT, uri = createUri(settings = settings, - storageType = BlobType, + storageType = storageType, objectPath = objectPath, - queryString = createQueryString(settings, Some("restype=container"))), - headers = StorageHeaders().headers - ) - + queryString = createQueryString(settings, queryString)), + headers = headers + ).withEntity(httpEntity) handlePutRequest(request, settings) } .mapMaterializedValue(_ => NotUsed) + } private def handlePutRequest(request: HttpRequest, settings: StorageSettings)(implicit system: ActorSystem) = { import system.dispatcher diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala index b5b86ad9fa..b7b162a932 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala @@ -137,9 +137,9 @@ object BlobService { payload: Source[ByteString, _], leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream - .putBlockBlob( + .putBlob( objectPath, - HttpEntity(contentType.asInstanceOf[ScalaContentType], contentLength, payload.asScala), + Some(HttpEntity(contentType.asInstanceOf[ScalaContentType], contentLength, payload.asScala)), StorageHeaders .create() .withContentLengthHeader(contentLength) @@ -168,8 +168,9 @@ object BlobService { blobSequenceNumber: Optional[Int], leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream - .putPageOrAppendBlock( + .putBlob( objectPath, + None, StorageHeaders .create() .withContentTypeHeader(contentType.asInstanceOf[ScalaContentType]) @@ -197,8 +198,9 @@ object BlobService { contentType: ContentType, leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream - .putPageOrAppendBlock( + .putBlob( objectPath, + None, StorageHeaders .create() .withContentTypeHeader(contentType.asInstanceOf[ScalaContentType]) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala index 998c0e2314..afdfb8827d 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala @@ -91,9 +91,9 @@ object BlobService { contentLength: Long, payload: Source[ByteString, _], leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.putBlockBlob( + AzureStorageStream.putBlob( objectPath, - HttpEntity(contentType, contentLength, payload), + Some(HttpEntity(contentType, contentLength, payload)), StorageHeaders() .withContentLengthHeader(contentLength) .withContentTypeHeader(contentType) @@ -118,8 +118,9 @@ object BlobService { maxBlockSize: Long, blobSequenceNumber: Option[Int] = None, leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.putPageOrAppendBlock( + AzureStorageStream.putBlob( objectPath, + None, StorageHeaders() .withContentTypeHeader(contentType) .withBlobTypeHeader(BlobTypeHeader.PageBlobHeader) @@ -141,8 +142,9 @@ object BlobService { def putAppendBlock(objectPath: String, contentType: ContentType = ContentTypes.`application/octet-stream`, leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.putPageOrAppendBlock( + AzureStorageStream.putBlob( objectPath, + None, StorageHeaders() .withContentTypeHeader(contentType) .withBlobTypeHeader(BlobTypeHeader.AppendBlobHeader) From c8afd96610886b5dd604492675f8f3c0d0fd0fb9 Mon Sep 17 00:00:00 2001 From: sfali Date: Thu, 29 Aug 2024 06:54:37 -0400 Subject: [PATCH 56/78] clean up, headers move to separate class #3253 --- .../storage/impl/AzureStorageStream.scala | 26 ++----------------- 1 file changed, 2 insertions(+), 24 deletions(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala index a83699ffff..7a37111c5b 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala @@ -12,9 +12,8 @@ import akka.actor.ActorSystem import akka.dispatch.ExecutionContexts import akka.http.scaladsl.Http import akka.http.scaladsl.model.StatusCodes.{Accepted, Created, NotFound, OK} -import akka.http.scaladsl.model.headers.{`Content-Length`, `Content-Type`, CustomHeader} +import akka.http.scaladsl.model.headers.{`Content-Length`, `Content-Type`} import akka.http.scaladsl.model.{ - ContentType, HttpEntity, HttpHeader, HttpMethod, @@ -27,6 +26,7 @@ import akka.http.scaladsl.model.{ Uri } import akka.http.scaladsl.unmarshalling.Unmarshal +import akka.stream.alpakka.azure.storage.headers.CustomContentTypeHeader import akka.stream.{Attributes, Materializer} import akka.stream.alpakka.azure.storage.impl.auth.Signer import akka.stream.scaladsl.{Flow, RetryFlow, Source} @@ -336,26 +336,4 @@ object AzureStorageStream { .map(settingsPath => storageExtension.settings(settingsPath.path)) .getOrElse(storageExtension.settings) } - - // `Content-Type` header is by design not accessible as header. So need to have a custom - // header implementation to expose that - private case class CustomContentTypeHeader(contentType: ContentType) extends CustomHeader { - override def name(): String = "Content-Type" - - override def value(): String = contentType.value - - override def renderInRequests(): Boolean = true - - override def renderInResponses(): Boolean = true - } - - private case class CustomContentLengthHeader(contentLength: Long) extends CustomHeader { - override def name(): String = "Content-Length" - - override def value(): String = contentLength.toString - - override def renderInRequests(): Boolean = true - - override def renderInResponses(): Boolean = true - } } From ba27834d6b0eb0a55979ed7252eccd6714bfcabd Mon Sep 17 00:00:00 2001 From: sfali Date: Thu, 29 Aug 2024 06:55:33 -0400 Subject: [PATCH 57/78] fix compiler warning #3253 --- .../stream/alpakka/azure/storage/impl/AzureStorageStream.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala index 7a37111c5b..80a748ae57 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala @@ -315,7 +315,7 @@ object AzureStorageStream { ) } - private def createQueryString(settings: StorageSettings, apiQueryString: Option[String] = None) = { + private def createQueryString(settings: StorageSettings, apiQueryString: Option[String]) = { if (settings.authorizationType == SasAuthorizationType) { if (settings.sasToken.isEmpty) throw new RuntimeException("SAS token must be defined for SAS authorization type.") else { From 31bdf558711db70fe5694ddde20a6799d88669ae Mon Sep 17 00:00:00 2001 From: sfali Date: Thu, 29 Aug 2024 07:10:50 -0400 Subject: [PATCH 58/78] using ScalaIgnore #3253 --- .../azure/storage/scaladsl/AzuriteIntegrationSpec.scala | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzuriteIntegrationSpec.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzuriteIntegrationSpec.scala index 52a8b38370..a8ccad9673 100644 --- a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzuriteIntegrationSpec.scala +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzuriteIntegrationSpec.scala @@ -11,12 +11,13 @@ import akka.actor.ActorSystem import akka.stream.Attributes import com.dimafeng.testcontainers.ForAllTestContainer import com.typesafe.config.ConfigFactory -import org.junit.Ignore +import org.scalatest.Ignore import scala.concurrent.duration._ import scala.concurrent.Await // TODO: investigate how Azurite works, it is not even working with pure Java API +// `putBlob` operations fails with "remature end of file." @Ignore class AzuriteIntegrationSpec extends StorageIntegrationSpec with ForAllTestContainer { From d1b5410ec7d4b9586c27874eeeb26b383a785e2b Mon Sep 17 00:00:00 2001 From: sfali Date: Thu, 29 Aug 2024 07:13:12 -0400 Subject: [PATCH 59/78] fix link #3253 --- docs/src/main/paradox/azure-storage.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/main/paradox/azure-storage.md b/docs/src/main/paradox/azure-storage.md index 1927be1892..e77b4dc5df 100644 --- a/docs/src/main/paradox/azure-storage.md +++ b/docs/src/main/paradox/azure-storage.md @@ -168,7 +168,7 @@ Java ### Delete Blob -The [`Delete File`](https://learn.microsoft.com/en-us/rest/api/storageservices/delete-file) operation immediately removes the file from the storage account. +The [`Delete File`](https://learn.microsoft.com/en-us/rest/api/storageservices/delete-file2) operation immediately removes the file from the storage account. Scala : @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #delete-file } From 988288158ec9f50aaa881e0019f26218315aa389 Mon Sep 17 00:00:00 2001 From: sfali Date: Sat, 31 Aug 2024 14:05:45 -0400 Subject: [PATCH 60/78] introduction of request builder #3253 1. support for SSE and additional headers in StorageRequest 2. introducing request builder and builder for each request 3. update Scala and Java API to use request builder 4. update implementation to use request builder 5. update tests 6.tests for request builder 7. update documentation --- .../headers/ServerSideEncryption.scala | 54 ++++++ .../storage/impl/AzureStorageStream.scala | 147 +++++------------ .../storage/{ => impl}/StorageHeaders.scala | 38 ++++- .../azure/storage/javadsl/BlobService.scala | 155 ++++-------------- .../azure/storage/javadsl/FileService.scala | 147 ++++------------- .../storage/requests/ClearFileRange.scala | 59 +++++++ .../storage/requests/CreateContainer.scala | 48 ++++++ .../azure/storage/requests/CreateFile.scala | 63 +++++++ .../azure/storage/requests/DeleteBlob.scala | 59 +++++++ .../azure/storage/requests/DeleteFile.scala | 59 +++++++ .../azure/storage/requests/GetBlob.scala | 74 +++++++++ .../azure/storage/requests/GetFile.scala | 74 +++++++++ .../storage/requests/GetProperties.scala | 59 +++++++ .../storage/requests/PutAppendBlock.scala | 57 +++++++ .../azure/storage/requests/PutBlockBlob.scala | 63 +++++++ .../azure/storage/requests/PutPageBlock.scala | 71 ++++++++ .../storage/requests/RequestBuilder.scala | 70 ++++++++ .../storage/requests/UpdateFileRange.scala | 67 ++++++++ .../azure/storage/scaladsl/BlobService.scala | 121 ++++---------- .../azure/storage/scaladsl/FileService.scala | 107 ++++-------- .../java/docs/javadsl/RequestBuilderTest.java | 83 ++++++++++ .../test/java/docs/javadsl/StorageTest.java | 54 +++--- .../scaladsl/AzureIntegrationTest.scala | 32 ++-- .../scaladsl/StorageIntegrationSpec.scala | 16 +- .../docs/scaladsl/RequestBuilderSpec.scala | 70 ++++++++ .../scala/docs/scaladsl/StorageSpec.scala | 47 +++--- docs/src/main/paradox/azure-storage.md | 59 ++++++- 27 files changed, 1369 insertions(+), 584 deletions(-) create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/headers/ServerSideEncryption.scala rename azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/{ => impl}/StorageHeaders.scala (83%) create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/ClearFileRange.scala create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/CreateContainer.scala create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/CreateFile.scala create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/DeleteBlob.scala create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/DeleteFile.scala create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/GetBlob.scala create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/GetFile.scala create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/GetProperties.scala create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/PutAppendBlock.scala create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/PutBlockBlob.scala create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/PutPageBlock.scala create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/RequestBuilder.scala create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/UpdateFileRange.scala create mode 100644 azure-storage/src/test/java/docs/javadsl/RequestBuilderTest.java create mode 100644 azure-storage/src/test/scala/docs/scaladsl/RequestBuilderSpec.scala diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/headers/ServerSideEncryption.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/headers/ServerSideEncryption.scala new file mode 100644 index 0000000000..e1e092e18d --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/headers/ServerSideEncryption.scala @@ -0,0 +1,54 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package headers + +import akka.annotation.InternalApi +import akka.http.scaladsl.model.HttpHeader +import akka.http.scaladsl.model.headers.RawHeader + +import java.security.MessageDigest +import java.util.{Base64, Objects} + +sealed abstract class ServerSideEncryption { + @InternalApi private[storage] def headers: Seq[HttpHeader] +} + +object ServerSideEncryption { + def customerKey(key: String, hash: Option[String]): ServerSideEncryption = new CustomerKey(key, hash) + def customerKey(key: String): ServerSideEncryption = customerKey(key, None) +} + +final class CustomerKey private[headers] (val key: String, val hash: Option[String] = None) + extends ServerSideEncryption { + override private[storage] def headers: Seq[HttpHeader] = Seq( + RawHeader("x-ms-encryption-algorithm", "AES256"), + RawHeader("x-ms-encryption-key", key), + RawHeader("x-ms-encryption-key-sha256", hash.getOrElse(createHash)) + ) + + override def equals(obj: Any): Boolean = + obj match { + case other: CustomerKey => key == other.key && hash == other.hash + case _ => false + } + + override def hashCode(): Int = Objects.hash(key, hash) + + override def toString: String = + s"""ServerSideEncryption.CustomerKeys( + |key=$key, + | hash=$hash + |) + |""".stripMargin.replaceAll(System.lineSeparator(), "") + + private def createHash = { + val messageDigest = MessageDigest.getInstance("SHA-256") + val decodedKey = messageDigest.digest(Base64.getDecoder.decode(key)) + Base64.getEncoder.encodeToString(decodedKey) + } +} diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala index 80a748ae57..35bf2272fb 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala @@ -16,19 +16,24 @@ import akka.http.scaladsl.model.headers.{`Content-Length`, `Content-Type`} import akka.http.scaladsl.model.{ HttpEntity, HttpHeader, - HttpMethod, - HttpMethods, HttpRequest, HttpResponse, + MessageEntity, ResponseEntity, - StatusCode, - UniversalEntity, - Uri + StatusCode } import akka.http.scaladsl.unmarshalling.Unmarshal import akka.stream.alpakka.azure.storage.headers.CustomContentTypeHeader import akka.stream.{Attributes, Materializer} import akka.stream.alpakka.azure.storage.impl.auth.Signer +import akka.stream.alpakka.azure.storage.requests.{ + ClearFileRange, + CreateContainer, + CreateFile, + GetProperties, + RequestBuilder, + UpdateFileRange +} import akka.stream.scaladsl.{Flow, RetryFlow, Source} import akka.util.ByteString @@ -40,21 +45,12 @@ object AzureStorageStream { private[storage] def getObject(storageType: String, objectPath: String, - versionId: Option[String], - headers: Seq[HttpHeader]): Source[ByteString, Future[ObjectMetadata]] = + requestBuilder: RequestBuilder): Source[ByteString, Future[ObjectMetadata]] = Source .fromMaterializer { (mat, attr) => implicit val system: ActorSystem = mat.system val settings = resolveSettings(attr, system) - val request = - createRequest( - method = HttpMethods.GET, - uri = createUri(settings = settings, - storageType = storageType, - objectPath = objectPath, - queryString = createQueryString(settings, versionId.map(value => s"versionId=$value"))), - headers = headers - ) + val request = requestBuilder.createRequest(settings, storageType, objectPath) val objectMetadataMat = Promise[ObjectMetadata]() signAndRequest(request, settings)(mat.system) .map(response => response.withEntity(response.entity.withoutSizeLimit)) @@ -75,23 +71,13 @@ object AzureStorageStream { private[storage] def getObjectProperties(storageType: String, objectPath: String, - versionId: Option[String], - headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = + requestBuilder: GetProperties): Source[Option[ObjectMetadata], NotUsed] = Source .fromMaterializer { (mat, attr) => implicit val system: ActorSystem = mat.system import mat.executionContext val settings = resolveSettings(attr, system) - val request = - createRequest( - method = HttpMethods.HEAD, - uri = createUri(settings = settings, - storageType = storageType, - objectPath = objectPath, - queryString = createQueryString(settings, versionId.map(value => s"versionId=$value"))), - headers = headers - ) - + val request = requestBuilder.createRequest(settings, storageType, objectPath) signAndRequest(request, settings) .flatMapConcat { case HttpResponse(OK, headers, entity, _) => @@ -107,23 +93,13 @@ object AzureStorageStream { private[storage] def deleteObject(storageType: String, objectPath: String, - versionId: Option[String], - headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = + requestBuilder: RequestBuilder): Source[Option[ObjectMetadata], NotUsed] = Source .fromMaterializer { (mat, attr) => implicit val system: ActorSystem = mat.system import mat.executionContext val settings = resolveSettings(attr, system) - val request = - createRequest( - method = HttpMethods.DELETE, - uri = createUri(settings = settings, - storageType = storageType, - objectPath = objectPath, - queryString = createQueryString(settings, versionId.map(value => s"versionId=$value"))), - headers = headers - ) - + val request = requestBuilder.createRequest(settings, storageType, objectPath) signAndRequest(request, settings) .flatMapConcat { case HttpResponse(Accepted, headers, entity, _) => @@ -138,59 +114,54 @@ object AzureStorageStream { .mapMaterializedValue(_ => NotUsed) private[storage] def putBlob(objectPath: String, - maybeHttpEntity: Option[UniversalEntity], - headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = - putRequest(objectPath = objectPath, storageType = BlobType, maybeHttpEntity = maybeHttpEntity, headers = headers) + requestBuilder: RequestBuilder, + maybeHttpEntity: Option[MessageEntity]): Source[Option[ObjectMetadata], NotUsed] = + putRequest(objectPath = objectPath, + storageType = BlobType, + requestBuilder = requestBuilder, + maybeHttpEntity = maybeHttpEntity) private[storage] def createFile(objectPath: String, - headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = - putRequest(objectPath = objectPath, storageType = FileType, headers = headers) + requestBuilder: CreateFile): Source[Option[ObjectMetadata], NotUsed] = + putRequest(objectPath = objectPath, storageType = FileType, requestBuilder = requestBuilder) private[storage] def updateRange(objectPath: String, - httpEntity: UniversalEntity, - headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = - putRequest(objectPath = objectPath, - storageType = FileType, - maybeHttpEntity = Some(httpEntity), - queryString = Some("comp=range"), - headers = headers) + httpEntity: MessageEntity, + requestBuilder: UpdateFileRange): Source[Option[ObjectMetadata], NotUsed] = + putRequest( + objectPath = objectPath, + storageType = FileType, + requestBuilder = requestBuilder, + maybeHttpEntity = Some(httpEntity) + ) private[storage] def clearRange(objectPath: String, - headers: Seq[HttpHeader]): Source[Option[ObjectMetadata], NotUsed] = - putRequest(objectPath = objectPath, storageType = FileType, queryString = Some("comp=range"), headers = headers) + requestBuilder: ClearFileRange): Source[Option[ObjectMetadata], NotUsed] = + putRequest(objectPath = objectPath, storageType = FileType, requestBuilder = requestBuilder) - private[storage] def createContainer(objectPath: String): Source[Option[ObjectMetadata], NotUsed] = - putRequest(objectPath = objectPath, storageType = BlobType, queryString = Some("restype=container")) + private[storage] def createContainer(objectPath: String, + requestBuilder: CreateContainer): Source[Option[ObjectMetadata], NotUsed] = + putRequest(objectPath = objectPath, storageType = BlobType, requestBuilder = requestBuilder) /** - * Common function for "PUT" request where we don't expect response body. + *Common function for "PUT" request where we don't expect response body. * * @param objectPath path of the object. * @param storageType storage type + * @param requestBuilder request builder * @param maybeHttpEntity optional http entity - * @param queryString query string - * @param headers request headers * @return Source with metadata containing response headers */ private def putRequest(objectPath: String, storageType: String, - maybeHttpEntity: Option[UniversalEntity] = None, - queryString: Option[String] = None, - headers: Seq[HttpHeader] = Seq.empty): Source[Option[ObjectMetadata], NotUsed] = { + requestBuilder: RequestBuilder, + maybeHttpEntity: Option[MessageEntity] = None): Source[Option[ObjectMetadata], NotUsed] = { Source .fromMaterializer { (mat, attr) => implicit val system: ActorSystem = mat.system val settings = resolveSettings(attr, system) val httpEntity = maybeHttpEntity.getOrElse(HttpEntity.Empty) - val request = - createRequest( - method = HttpMethods.PUT, - uri = createUri(settings = settings, - storageType = storageType, - objectPath = objectPath, - queryString = createQueryString(settings, queryString)), - headers = headers - ).withEntity(httpEntity) + val request = requestBuilder.createRequest(settings, storageType, objectPath).withEntity(httpEntity) handlePutRequest(request, settings) } .mapMaterializedValue(_ => NotUsed) @@ -291,40 +262,6 @@ object AzureStorageStream { } } - private def createRequest(method: HttpMethod, uri: Uri, headers: Seq[HttpHeader]) = - HttpRequest(method = method, uri = uri, headers = headers) - - private def createUri(settings: StorageSettings, - storageType: String, - objectPath: String, - queryString: Option[String]) = { - val accountName = settings.azureNameKeyCredential.accountName - val path = if (objectPath.startsWith("/")) objectPath else s"/$objectPath" - settings.endPointUrl - .map { endPointUrl => - val qs = queryString.getOrElse("") - Uri(endPointUrl).withPath(Uri.Path(s"/$accountName$path")).withQuery(Uri.Query(qs)) - } - .getOrElse( - Uri.from( - scheme = "https", - host = s"$accountName.$storageType.core.windows.net", - path = Uri.Path(path).toString(), - queryString = queryString - ) - ) - } - - private def createQueryString(settings: StorageSettings, apiQueryString: Option[String]) = { - if (settings.authorizationType == SasAuthorizationType) { - if (settings.sasToken.isEmpty) throw new RuntimeException("SAS token must be defined for SAS authorization type.") - else { - val sasToken = settings.sasToken.get - Some(apiQueryString.map(qs => s"$sasToken&$qs").getOrElse(sasToken)) - } - } else apiQueryString - } - private def resolveSettings(attr: Attributes, sys: ActorSystem) = attr .get[StorageSettingsValue] diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageHeaders.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/StorageHeaders.scala similarity index 83% rename from azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageHeaders.scala rename to azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/StorageHeaders.scala index d9e2a745fc..5b48a5e0b4 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageHeaders.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/StorageHeaders.scala @@ -5,15 +5,17 @@ package akka.stream.alpakka package azure package storage +package impl import akka.http.scaladsl.model.headers.{ByteRange, RawHeader, Range => RangeHeader} import akka.http.scaladsl.model.{ContentType, HttpHeader} -import headers.{BlobTypeHeader, CustomContentLengthHeader, CustomContentTypeHeader, RangeWriteTypeHeader} +import akka.stream.alpakka.azure.storage.headers._ import java.util.Objects private[storage] class StorageHeaders private (val contentLengthHeader: Option[HttpHeader] = None, val contentTypeHeader: Option[HttpHeader] = None, + val sse: Option[ServerSideEncryption] = None, val rangeHeader: Option[HttpHeader] = None, val blobTypeHeader: Option[HttpHeader] = None, val leaseIdHeader: Option[HttpHeader] = None, @@ -21,7 +23,8 @@ private[storage] class StorageHeaders private (val contentLengthHeader: Option[H val rangeWriteTypeHeader: Option[HttpHeader] = None, val fileMaxContentLengthHeader: Option[HttpHeader] = None, val pageBlobContentLengthHeader: Option[HttpHeader] = None, - val pageBlobSequenceNumberHeader: Option[HttpHeader] = None) { + val pageBlobSequenceNumberHeader: Option[HttpHeader] = None, + val additionalHeaders: Seq[HttpHeader] = Seq.empty) { private[storage] def headers: Seq[HttpHeader] = (contentLengthHeader ++ @@ -33,7 +36,7 @@ private[storage] class StorageHeaders private (val contentLengthHeader: Option[H rangeWriteTypeHeader ++ fileMaxContentLengthHeader ++ pageBlobContentLengthHeader ++ - pageBlobSequenceNumberHeader).toSeq + pageBlobSequenceNumberHeader).toSeq ++ sse.map(_.headers).getOrElse(Seq.empty) ++ additionalHeaders private[storage] def withContentLengthHeader(contentLength: Long): StorageHeaders = copy(contentLengthHeader = Some(CustomContentLengthHeader(contentLength))) @@ -41,6 +44,9 @@ private[storage] class StorageHeaders private (val contentLengthHeader: Option[H private[storage] def withContentTypeHeader(contentType: ContentType): StorageHeaders = copy(contentTypeHeader = Some(CustomContentTypeHeader(contentType))) + private[storage] def witServerSideEncryption(sse: Option[ServerSideEncryption]): StorageHeaders = + copy(sse = sse) + private[storage] def withRangeHeader(range: ByteRange): StorageHeaders = copy(rangeHeader = Some(RangeHeader(range))) @@ -71,19 +77,28 @@ private[storage] class StorageHeaders private (val contentLengthHeader: Option[H sequenceNumber.map(value => RawHeader(PageBlobSequenceNumberHeaderKey, value.toString)) ) + private[storage] def withAdditionalHeaders(additionalHeaders: Seq[HttpHeader]) = + copy(additionalHeaders = additionalHeaders) + + private[storage] def addHeader(httpHeader: HttpHeader): StorageHeaders = + copy(additionalHeaders = additionalHeaders :+ httpHeader) + private def copy(contentLengthHeader: Option[HttpHeader] = contentLengthHeader, contentTypeHeader: Option[HttpHeader] = contentTypeHeader, rangeHeader: Option[HttpHeader] = rangeHeader, + sse: Option[ServerSideEncryption] = sse, blobTypeHeader: Option[HttpHeader] = blobTypeHeader, leaseIdHeader: Option[HttpHeader] = leaseIdHeader, rangeWriteTypeHeader: Option[HttpHeader] = fileWriteTypeHeader, fileTypeHeader: Option[HttpHeader] = rangeWriteTypeHeader, fileMaxContentLengthHeader: Option[HttpHeader] = fileMaxContentLengthHeader, pageBlobContentLengthHeader: Option[HttpHeader] = pageBlobContentLengthHeader, - pageBlobSequenceNumberHeader: Option[HttpHeader] = pageBlobSequenceNumberHeader) = + pageBlobSequenceNumberHeader: Option[HttpHeader] = pageBlobSequenceNumberHeader, + additionalHeaders: Seq[HttpHeader] = additionalHeaders) = new StorageHeaders( contentLengthHeader = contentLengthHeader, contentTypeHeader = contentTypeHeader, + sse = sse, rangeHeader = rangeHeader, blobTypeHeader = blobTypeHeader, leaseIdHeader = leaseIdHeader, @@ -91,7 +106,8 @@ private[storage] class StorageHeaders private (val contentLengthHeader: Option[H rangeWriteTypeHeader = fileTypeHeader, fileMaxContentLengthHeader = fileMaxContentLengthHeader, pageBlobContentLengthHeader = pageBlobContentLengthHeader, - pageBlobSequenceNumberHeader = pageBlobSequenceNumberHeader + pageBlobSequenceNumberHeader = pageBlobSequenceNumberHeader, + additionalHeaders = additionalHeaders ) override def toString: String = @@ -99,13 +115,15 @@ private[storage] class StorageHeaders private (val contentLengthHeader: Option[H |contentLengthHeader=${contentLengthHeader.map(_.value()).getOrElse("None")}, | contentTypeHeader=${contentTypeHeader.map(_.value()).getOrElse("None")}, | rangeHeader=${rangeHeader.map(_.value()).getOrElse("None")}, + | sse=${sse.toString}, | blobTypeHeader=${blobTypeHeader.map(_.value()).getOrElse("None")}, | leaseIdHeader=${leaseIdHeader.map(_.value()).getOrElse("None")}, | fileWriteTypeHeader=${fileWriteTypeHeader.map(_.value()).getOrElse("None")}, | fileTypeHeader=${rangeWriteTypeHeader.map(_.value()).getOrElse("None")}, | fileMaxContentLengthHeader=${fileMaxContentLengthHeader.map(_.value()).getOrElse("None")}, | pageBlobContentLengthHeader=${pageBlobContentLengthHeader.map(_.value()).getOrElse("None")}, - | pageBlobSequenceNumberHeader=${pageBlobSequenceNumberHeader.map(_.value()).getOrElse("None")} + | pageBlobSequenceNumberHeader=${pageBlobSequenceNumberHeader.map(_.value()).getOrElse("None")}, + | additionalHeaders=${additionalHeaders.mkString("[", " ,", "]")} |)""".stripMargin.replaceAll(System.lineSeparator(), "") override def equals(obj: Any): Boolean = @@ -113,13 +131,15 @@ private[storage] class StorageHeaders private (val contentLengthHeader: Option[H case other: StorageHeaders => Objects.equals(contentLengthHeader, other.contentLengthHeader) && Objects.equals(contentTypeHeader, other.contentTypeHeader) && + Objects.equals(sse, other.sse) && Objects.equals(rangeHeader, other.rangeHeader) && Objects.equals(blobTypeHeader, other.blobTypeHeader) && Objects.equals(leaseIdHeader, other.leaseIdHeader) && Objects.equals(fileWriteTypeHeader, other.fileWriteTypeHeader) && Objects.equals(fileMaxContentLengthHeader, other.fileMaxContentLengthHeader) && Objects.equals(pageBlobContentLengthHeader, other.pageBlobContentLengthHeader) && - Objects.equals(pageBlobSequenceNumberHeader, other.pageBlobSequenceNumberHeader) + Objects.equals(pageBlobSequenceNumberHeader, other.pageBlobSequenceNumberHeader) && + Objects.equals(additionalHeaders, other.additionalHeaders) case _ => false } @@ -127,6 +147,7 @@ private[storage] class StorageHeaders private (val contentLengthHeader: Option[H Objects.hash( contentLengthHeader, contentTypeHeader, + sse, rangeHeader, blobTypeHeader, leaseIdHeader, @@ -134,7 +155,8 @@ private[storage] class StorageHeaders private (val contentLengthHeader: Option[H rangeWriteTypeHeader, fileMaxContentLengthHeader, pageBlobContentLengthHeader, - pageBlobSequenceNumberHeader + pageBlobSequenceNumberHeader, + additionalHeaders ) } diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala index b7b162a932..7bab23a627 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala @@ -8,12 +8,17 @@ package storage package javadsl import akka.NotUsed -import akka.http.javadsl.model._ -import akka.http.javadsl.model.headers.ByteRange -import akka.http.scaladsl.model.headers.{ByteRange => ScalaByteRange} -import akka.http.scaladsl.model.{HttpEntity, ContentType => ScalaContentType} -import akka.stream.alpakka.azure.storage.headers.BlobTypeHeader +import akka.http.scaladsl.model.HttpEntity import akka.stream.alpakka.azure.storage.impl.AzureStorageStream +import akka.stream.alpakka.azure.storage.requests.{ + CreateContainer, + DeleteFile, + GetBlob, + GetProperties, + PutAppendBlock, + PutBlockBlob, + PutPageBlock +} import akka.stream.javadsl.Source import akka.stream.scaladsl.SourceToCompletionStage import akka.util.ByteString @@ -30,54 +35,14 @@ object BlobService { * Gets blob representing `objectPath` with specified range (if applicable). * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` - * @param range range to download - * @param versionId versionId of the blob (if applicable) - * @param leaseId lease ID of an active lease (if applicable) - * @return A [[akka.stream.javadsl.Source]] containing the objects data as a [[akka.util.ByteString]] along with a - * materialized value containing the [[akka.stream.alpakka.azure.storage.ObjectMetadata]] - */ - def getBlob(objectPath: String, - range: ByteRange, - versionId: Optional[String], - leaseId: Optional[String]): Source[ByteString, CompletionStage[ObjectMetadata]] = - new Source( - AzureStorageStream - .getObject( - BlobType, - objectPath, - Option(versionId.orElse(null)), - StorageHeaders - .create() - .withRangeHeader(range.asInstanceOf[ScalaByteRange]) - .withLeaseIdHeader(Option(leaseId.orElse(null))) - .headers - ) - .toCompletionStage() - ) - - /** - * Gets blob representing `objectPath` with specified range (if applicable). - * - * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` - * @param versionId versionId of the blob (if applicable) - * @param leaseId lease ID of an active lease (if applicable) + * @param requestBuilder builder to build getBlob properties request * @return A [[akka.stream.javadsl.Source]] containing the objects data as a [[akka.util.ByteString]] along with a * materialized value containing the [[akka.stream.alpakka.azure.storage.ObjectMetadata]] */ - def getBlob(objectPath: String, - versionId: Optional[String], - leaseId: Optional[String]): Source[ByteString, CompletionStage[ObjectMetadata]] = + def getBlob(objectPath: String, requestBuilder: GetBlob): Source[ByteString, CompletionStage[ObjectMetadata]] = new Source( AzureStorageStream - .getObject( - BlobType, - objectPath, - Option(versionId.orElse(null)), - StorageHeaders - .create() - .withLeaseIdHeader(Option(leaseId.orElse(null))) - .headers - ) + .getObject(BlobType, objectPath, requestBuilder) .toCompletionStage() ) @@ -85,19 +50,13 @@ object BlobService { * Gets blob properties. * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` - * @param versionId versionId of the blob (if applicable) - * @param leaseId lease ID of an active lease (if applicable) + * @param requestBuilder builder to build getBlob properties request * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ - def getProperties(objectPath: String, - versionId: Optional[String], - leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = + def getProperties(objectPath: String, requestBuilder: GetProperties): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream - .getObjectProperties(BlobType, - objectPath, - Option(versionId.orElse(null)), - StorageHeaders.create().withLeaseIdHeader(Option(leaseId.orElse(null))).headers) + .getObjectProperties(BlobType, objectPath, requestBuilder) .map(opt => Optional.ofNullable(opt.orNull)) .asJava @@ -105,19 +64,13 @@ object BlobService { * Deletes blob. * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` - * @param versionId versionId of the blob (if applicable) - * @param leaseId lease ID of an active lease (if applicable) + * @param requestBuilder builder to build deleteFile request * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ - def deleteBlob(objectPath: String, - versionId: Optional[String], - leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = + def deleteBlob(objectPath: String, requestBuilder: DeleteFile): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream - .deleteObject(BlobType, - objectPath, - Option(versionId.orElse(null)), - StorageHeaders.create().withLeaseIdHeader(Option(leaseId.orElse(null))).headers) + .deleteObject(BlobType, objectPath, requestBuilder) .map(opt => Optional.ofNullable(opt.orNull)) .asJava @@ -125,28 +78,19 @@ object BlobService { * Put Block blob. * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` - * @param contentType content type of the blob - * @param contentLength length of the blob + * @param requestBuilder builder to build putBlockBlob request * @param payload actual payload, a [[akka.stream.javadsl.Source Source]] of [[akka.util.ByteString ByteString]] * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ def putBlockBlob(objectPath: String, - contentType: ContentType, - contentLength: Long, - payload: Source[ByteString, _], - leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = + requestBuilder: PutBlockBlob, + payload: Source[ByteString, _]): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream .putBlob( objectPath, - Some(HttpEntity(contentType.asInstanceOf[ScalaContentType], contentLength, payload.asScala)), - StorageHeaders - .create() - .withContentLengthHeader(contentLength) - .withContentTypeHeader(contentType.asInstanceOf[ScalaContentType]) - .withLeaseIdHeader(Option(leaseId.orElse(null))) - .withBlobTypeHeader(BlobTypeHeader.BlockBlobHeader) - .headers + requestBuilder, + Some(HttpEntity(requestBuilder.contentType, requestBuilder.contentLength, payload.asScala)) ) .map(opt => Optional.ofNullable(opt.orNull)) .asJava @@ -155,33 +99,13 @@ object BlobService { * Put (Create) Page Blob. * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` - * @param contentType content type of the blob - * @param maxBlockSize maximum block size - * @param blobSequenceNumber optional block sequence number - * @param leaseId lease ID of an active lease (if applicable) + * @param requestBuilder builder to build putPageBlob request * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ - def putPageBlock(objectPath: String, - contentType: ContentType, - maxBlockSize: Long, - blobSequenceNumber: Optional[Int], - leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = + def putPageBlock(objectPath: String, requestBuilder: PutPageBlock): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream - .putBlob( - objectPath, - None, - StorageHeaders - .create() - .withContentTypeHeader(contentType.asInstanceOf[ScalaContentType]) - .withBlobTypeHeader(BlobTypeHeader.PageBlobHeader) - .withPageBlobContentLengthHeader(maxBlockSize) - .withPageBlobSequenceNumberHeader( - Option(blobSequenceNumber.orElse(null.asInstanceOf[Integer])) - ) - .withLeaseIdHeader(Option(leaseId.orElse(null))) - .headers - ) + .putBlob(objectPath, requestBuilder, None) .map(opt => Optional.ofNullable(opt.orNull)) .asJava @@ -189,35 +113,24 @@ object BlobService { * Put (Create) Append Blob. * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` - * @param contentType content type of the blob - * @param leaseId lease ID of an active lease (if applicable) + * @param requestBuilder builder to build putAppendBlob request * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ - def putAppendBlock(objectPath: String, - contentType: ContentType, - leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = + def putAppendBlock(objectPath: String, requestBuilder: PutAppendBlock): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream - .putBlob( - objectPath, - None, - StorageHeaders - .create() - .withContentTypeHeader(contentType.asInstanceOf[ScalaContentType]) - .withBlobTypeHeader(BlobTypeHeader.AppendBlobHeader) - .withLeaseIdHeader(Option(leaseId.orElse(null))) - .headers - ) + .putBlob(objectPath, requestBuilder, None) .map(opt => Optional.ofNullable(opt.orNull)) .asJava /** * Create container. * - * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` + * @param objectPath name of the container + * @param requestBuilder builder to build createContainer request * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ - def createContainer(objectPath: String): Source[Optional[ObjectMetadata], NotUsed] = - AzureStorageStream.createContainer(objectPath).map(opt => Optional.ofNullable(opt.orNull)).asJava + def createContainer(objectPath: String, requestBuilder: CreateContainer): Source[Optional[ObjectMetadata], NotUsed] = + AzureStorageStream.createContainer(objectPath, requestBuilder).map(opt => Optional.ofNullable(opt.orNull)).asJava } diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala index f948e3ace3..134afa0345 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala @@ -8,13 +8,16 @@ package storage package javadsl import akka.NotUsed -import akka.http.javadsl.model._ -import akka.http.javadsl.model.headers.ByteRange -import akka.http.scaladsl.model.headers.ByteRange.Slice -import akka.http.scaladsl.model.headers.{ByteRange => ScalaByteRange} -import akka.http.scaladsl.model.{HttpEntity, ContentType => ScalaContentType} -import akka.stream.alpakka.azure.storage.headers.RangeWriteTypeHeader +import akka.http.scaladsl.model.HttpEntity import akka.stream.alpakka.azure.storage.impl.AzureStorageStream +import akka.stream.alpakka.azure.storage.requests.{ + ClearFileRange, + CreateFile, + DeleteFile, + GetFile, + GetProperties, + UpdateFileRange +} import akka.stream.javadsl.Source import akka.stream.scaladsl.SourceToCompletionStage import akka.util.ByteString @@ -31,54 +34,14 @@ object FileService { * Gets file representing `objectPath` with specified range (if applicable). * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/share/my-directory/blob` - * @param range range to download - * @param versionId versionId of the file (if applicable) - * @param leaseId lease ID of an active lease (if applicable) + * @param requestBuilder builder to build getBlob request * @return A [[akka.stream.javadsl.Source]] containing the objects data as a [[akka.util.ByteString]] along with a * materialized value containing the [[akka.stream.alpakka.azure.storage.ObjectMetadata]] */ - def getFile(objectPath: String, - range: ByteRange, - versionId: Optional[String], - leaseId: Optional[String]): Source[ByteString, CompletionStage[ObjectMetadata]] = + def getFile(objectPath: String, requestBuilder: GetFile): Source[ByteString, CompletionStage[ObjectMetadata]] = { new Source( AzureStorageStream - .getObject( - FileType, - objectPath, - Option(versionId.orElse(null)), - StorageHeaders - .create() - .withRangeHeader(range.asInstanceOf[ScalaByteRange]) - .withLeaseIdHeader(Option(leaseId.orElse(null))) - .headers - ) - .toCompletionStage() - ) - - /** - * Gets file representing `objectPath` with specified range (if applicable). - * - * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/share/my-directory/blob` - * @param versionId versionId of the file (if applicable) - * @param leaseId lease ID of an active lease (if applicable) - * @return A [[akka.stream.javadsl.Source]] containing the objects data as a [[akka.util.ByteString]] along with a - * materialized value containing the [[akka.stream.alpakka.azure.storage.ObjectMetadata]] - */ - def getFile(objectPath: String, - versionId: Optional[String], - leaseId: Optional[String]): Source[ByteString, CompletionStage[ObjectMetadata]] = { - new Source( - AzureStorageStream - .getObject( - FileType, - objectPath, - Option(versionId.orElse(null)), - StorageHeaders - .create() - .withLeaseIdHeader(Option(leaseId.orElse(null))) - .headers - ) + .getObject(FileType, objectPath, requestBuilder) .toCompletionStage() ) } @@ -87,19 +50,13 @@ object FileService { * Gets file properties. * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/share/my-directory/blob` - * @param versionId versionId of the file (if applicable) - * @param leaseId lease ID of an active lease (if applicable) + * @param requestBuilder builder to build getFile proroperties request * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ - def getProperties(objectPath: String, - versionId: Optional[String], - leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = + def getProperties(objectPath: String, requestBuilder: GetProperties): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream - .getObjectProperties(FileType, - objectPath, - Option(versionId.orElse(null)), - StorageHeaders.create().withLeaseIdHeader(Option(leaseId.orElse(null))).headers) + .getObjectProperties(FileType, objectPath, requestBuilder) .map(opt => Optional.ofNullable(opt.orNull)) .asJava @@ -107,19 +64,13 @@ object FileService { * Deletes file. * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/share/my-directory/blob` - * @param versionId versionId of the file (if applicable) - * @param leaseId lease ID of an active lease (if applicable) + * @param requestBuilder builder to build getFile proroperties request * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ - def deleteFile(objectPath: String, - versionId: Optional[String], - leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = + def deleteFile(objectPath: String, requestBuilder: DeleteFile): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream - .deleteObject(FileType, - objectPath, - Option(versionId.orElse(null)), - StorageHeaders.create().withLeaseIdHeader(Option(leaseId.orElse(null))).headers) + .deleteObject(FileType, objectPath, requestBuilder) .map(opt => Optional.ofNullable(opt.orNull)) .asJava @@ -127,27 +78,13 @@ object FileService { * Creates a file. * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/share/my-directory/blob` - * @param contentType content type of the blob - * @param maxSize maximum size of the file - * @param leaseId lease ID of an active lease (if applicable) + * @param requestBuilder builder to build createFile request * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ - def createFile(objectPath: String, - contentType: ContentType, - maxSize: Long, - leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = + def createFile(objectPath: String, requestBuilder: CreateFile): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream - .createFile( - objectPath, - StorageHeaders - .create() - .withContentTypeHeader(contentType.asInstanceOf[ScalaContentType]) - .withFileMaxContentLengthHeader(maxSize) - .withFileTypeHeader() - .withLeaseIdHeader(Option(leaseId.orElse(null))) - .headers - ) + .createFile(objectPath, requestBuilder) .map(opt => Optional.ofNullable(opt.orNull)) .asJava @@ -155,31 +92,21 @@ object FileService { * Updates file on the specified range. * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/share/my-directory/blob` - * @param contentType content type of the blob - * @param range range of bytes to be written + * @param requestBuilder range of bytes to be written * @param payload actual payload, a [[akka.stream.javadsl.Source Source]] of [[akka.util.ByteString ByteString]] - * @param leaseId lease ID of an active lease (if applicable) * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ def updateRange(objectPath: String, - contentType: ContentType, - range: Slice, - payload: Source[ByteString, _], - leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = { - val contentLength = range.last - range.first + 1 + requestBuilder: UpdateFileRange, + payload: Source[ByteString, _]): Source[Optional[ObjectMetadata], NotUsed] = { AzureStorageStream .updateRange( objectPath, - HttpEntity(contentType.asInstanceOf[ScalaContentType], contentLength, payload.asScala), - StorageHeaders - .create() - .withContentLengthHeader(contentLength) - .withContentTypeHeader(contentType.asInstanceOf[ScalaContentType]) - .withRangeHeader(range) - .withLeaseIdHeader(Option(leaseId.orElse(null))) - .withRangeWriteTypeHeader(RangeWriteTypeHeader.UpdateFileHeader) - .headers + HttpEntity(requestBuilder.contentType, + requestBuilder.range.last - requestBuilder.range.first + 1, + payload.asScala), + requestBuilder ) .map(opt => Optional.ofNullable(opt.orNull)) .asJava @@ -189,25 +116,13 @@ object FileService { * Clears specified range from the file. * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/share/my-directory/blob` - * @param range range of bytes to be cleared - * @param leaseId lease ID of an active lease (if applicable) + * @param requestBuilder builder to build clearRange request * @return A [[akka.stream.javadsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ - def clearRange(objectPath: String, - range: Slice, - leaseId: Optional[String]): Source[Optional[ObjectMetadata], NotUsed] = + def clearRange(objectPath: String, requestBuilder: ClearFileRange): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream - .clearRange( - objectPath, - StorageHeaders - .create() - .withContentLengthHeader(0L) - .withRangeHeader(range) - .withLeaseIdHeader(Option(leaseId.orElse(null))) - .withRangeWriteTypeHeader(RangeWriteTypeHeader.ClearFileHeader) - .headers - ) + .clearRange(objectPath, requestBuilder) .map(opt => Optional.ofNullable(opt.orNull)) .asJava } diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/ClearFileRange.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/ClearFileRange.scala new file mode 100644 index 0000000000..ac546487f0 --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/ClearFileRange.scala @@ -0,0 +1,59 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package requests + +import akka.http.scaladsl.model.headers.ByteRange +import akka.http.scaladsl.model.{HttpHeader, HttpMethod, HttpMethods} +import akka.stream.alpakka.azure.storage.headers.{RangeWriteTypeHeader, ServerSideEncryption} +import akka.stream.alpakka.azure.storage.impl.StorageHeaders + +final class ClearFileRange(val range: ByteRange.Slice, + val leaseId: Option[String] = None, + override val sse: Option[ServerSideEncryption] = None, + override val additionalHeaders: Seq[HttpHeader] = Seq.empty) + extends RequestBuilder { + + override protected val method: HttpMethod = HttpMethods.PUT + + override protected val queryParams: Map[String, String] = super.queryParams ++ Map("comp" -> "range") + + def withLeaseId(leaseId: String): ClearFileRange = copy(leaseId = Option(leaseId)) + + override def withServerSideEncryption(sse: ServerSideEncryption): ClearFileRange = copy(sse = Option(sse)) + + override def addHeader(httpHeader: HttpHeader): ClearFileRange = + copy(additionalHeaders = additionalHeaders :+ httpHeader) + + private def copy(leaseId: Option[String] = leaseId, + sse: Option[ServerSideEncryption] = sse, + additionalHeaders: Seq[HttpHeader] = additionalHeaders) = + new ClearFileRange(range = range, leaseId = leaseId, sse = sse, additionalHeaders = additionalHeaders) + + override protected def getHeaders: Seq[HttpHeader] = + StorageHeaders() + .withContentLengthHeader(0L) + .withRangeHeader(range) + .withLeaseIdHeader(leaseId) + .withRangeWriteTypeHeader(RangeWriteTypeHeader.ClearFileHeader) + .witServerSideEncryption(sse) + .withAdditionalHeaders(additionalHeaders) + .headers +} + +object ClearFileRange { + + /* + * Scala API + */ + def apply(range: ByteRange.Slice): ClearFileRange = new ClearFileRange(range) + + /* + * Java API + */ + def create(range: ByteRange.Slice): ClearFileRange = ClearFileRange(range) +} diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/CreateContainer.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/CreateContainer.scala new file mode 100644 index 0000000000..c65226079d --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/CreateContainer.scala @@ -0,0 +1,48 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package requests + +import akka.http.scaladsl.model.{HttpHeader, HttpMethod, HttpMethods} +import akka.stream.alpakka.azure.storage.headers.ServerSideEncryption +import akka.stream.alpakka.azure.storage.impl.StorageHeaders + +final class CreateContainer(override val sse: Option[ServerSideEncryption] = None, + override val additionalHeaders: Seq[HttpHeader] = Seq.empty) + extends RequestBuilder { + + override protected val method: HttpMethod = HttpMethods.PUT + + override protected val queryParams: Map[String, String] = super.queryParams ++ Map("restype" -> "container") + + override def withServerSideEncryption(sse: ServerSideEncryption): CreateContainer = copy(sse = Option(sse)) + + override def addHeader(httpHeader: HttpHeader): CreateContainer = + copy(additionalHeaders = additionalHeaders :+ httpHeader) + + private def copy(sse: Option[ServerSideEncryption] = sse, additionalHeaders: Seq[HttpHeader] = additionalHeaders) = + new CreateContainer(sse = sse, additionalHeaders = additionalHeaders) + + override protected def getHeaders: Seq[HttpHeader] = + StorageHeaders() + .witServerSideEncryption(sse) + .withAdditionalHeaders(additionalHeaders) + .headers +} + +object CreateContainer { + + /* + * Scala API + */ + def apply(): CreateContainer = new CreateContainer() + + /* + * Java API + */ + def create(): CreateContainer = new CreateContainer() +} diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/CreateFile.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/CreateFile.scala new file mode 100644 index 0000000000..7940208d2b --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/CreateFile.scala @@ -0,0 +1,63 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package requests + +import akka.http.javadsl.model.{ContentType => JavaContentType} +import akka.http.scaladsl.model.{ContentType, HttpHeader, HttpMethod, HttpMethods} +import akka.stream.alpakka.azure.storage.headers.ServerSideEncryption +import akka.stream.alpakka.azure.storage.impl.StorageHeaders + +final class CreateFile(val maxFileSize: Long, + val contentType: ContentType, + val leaseId: Option[String] = None, + override val sse: Option[ServerSideEncryption] = None, + override val additionalHeaders: Seq[HttpHeader] = Seq.empty) + extends RequestBuilder { + + override protected val method: HttpMethod = HttpMethods.PUT + + def withLeaseId(leaseId: String): CreateFile = copy(leaseId = Option(leaseId)) + + override def withServerSideEncryption(sse: ServerSideEncryption): CreateFile = copy(sse = Option(sse)) + + override def addHeader(httpHeader: HttpHeader): CreateFile = + copy(additionalHeaders = additionalHeaders :+ httpHeader) + + override protected def getHeaders: Seq[HttpHeader] = + StorageHeaders() + .withContentTypeHeader(contentType) + .withFileMaxContentLengthHeader(maxFileSize) + .withFileTypeHeader() + .withLeaseIdHeader(leaseId) + .witServerSideEncryption(sse) + .withAdditionalHeaders(additionalHeaders) + .headers + + private def copy(leaseId: Option[String] = leaseId, + sse: Option[ServerSideEncryption] = sse, + additionalHeaders: Seq[HttpHeader] = additionalHeaders) = + new CreateFile(maxFileSize = maxFileSize, + contentType = contentType, + leaseId = leaseId, + sse = sse, + additionalHeaders = additionalHeaders) +} + +object CreateFile { + + /* + * Scala API + */ + def apply(maxFileSize: Long, contentType: ContentType): CreateFile = new CreateFile(maxFileSize, contentType) + + /* + * Java API + */ + def create(maxFileSize: Long, contentType: JavaContentType): CreateFile = + CreateFile(maxFileSize, contentType.asInstanceOf[ContentType]) +} diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/DeleteBlob.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/DeleteBlob.scala new file mode 100644 index 0000000000..dd85afb87b --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/DeleteBlob.scala @@ -0,0 +1,59 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package requests + +import akka.http.scaladsl.model.{HttpHeader, HttpMethod, HttpMethods} +import akka.stream.alpakka.azure.storage.headers.ServerSideEncryption +import akka.stream.alpakka.azure.storage.impl.StorageHeaders + +final class DeleteBlob(val versionId: Option[String] = None, + val leaseId: Option[String] = None, + override val sse: Option[ServerSideEncryption] = None, + override val additionalHeaders: Seq[HttpHeader] = Seq.empty) + extends RequestBuilder { + + override protected val method: HttpMethod = HttpMethods.DELETE + + override protected val queryParams: Map[String, String] = super.queryParams ++ + versionId.map(value => "versionId" -> value).toMap + + def withVersionId(versionId: String): DeleteBlob = copy(versionId = Option(versionId)) + + def withLeaseId(leaseId: String): DeleteBlob = copy(leaseId = Option(leaseId)) + + override def withServerSideEncryption(sse: ServerSideEncryption): DeleteBlob = copy(sse = Option(sse)) + + override def addHeader(httpHeader: HttpHeader): DeleteBlob = + copy(additionalHeaders = additionalHeaders :+ httpHeader) + + override protected def getHeaders: Seq[HttpHeader] = + StorageHeaders() + .witServerSideEncryption(sse) + .withLeaseIdHeader(leaseId) + .withAdditionalHeaders(additionalHeaders) + .headers + + private def copy(versionId: Option[String] = versionId, + leaseId: Option[String] = leaseId, + sse: Option[ServerSideEncryption] = sse, + additionalHeaders: Seq[HttpHeader] = additionalHeaders) = + new DeleteBlob(versionId = versionId, leaseId = leaseId, sse = sse, additionalHeaders = additionalHeaders) +} + +object DeleteBlob { + + /* + * Scala API + */ + def apply(): DeleteBlob = new DeleteBlob() + + /* + * Java API + */ + def create(): DeleteBlob = new DeleteBlob() +} diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/DeleteFile.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/DeleteFile.scala new file mode 100644 index 0000000000..d89ffb337b --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/DeleteFile.scala @@ -0,0 +1,59 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package requests + +import akka.http.scaladsl.model.{HttpHeader, HttpMethod, HttpMethods} +import akka.stream.alpakka.azure.storage.headers.ServerSideEncryption +import akka.stream.alpakka.azure.storage.impl.StorageHeaders + +final class DeleteFile(val versionId: Option[String] = None, + val leaseId: Option[String] = None, + override val sse: Option[ServerSideEncryption] = None, + override val additionalHeaders: Seq[HttpHeader] = Seq.empty) + extends RequestBuilder { + + override protected val method: HttpMethod = HttpMethods.DELETE + + override protected val queryParams: Map[String, String] = super.queryParams ++ + versionId.map(value => "versionId" -> value).toMap + + def withVersionId(versionId: String): DeleteFile = copy(versionId = Option(versionId)) + + def withLeaseId(leaseId: String): DeleteFile = copy(leaseId = Option(leaseId)) + + override def withServerSideEncryption(sse: ServerSideEncryption): DeleteFile = copy(sse = Option(sse)) + + override def addHeader(httpHeader: HttpHeader): DeleteFile = + copy(additionalHeaders = additionalHeaders :+ httpHeader) + + override protected def getHeaders: Seq[HttpHeader] = + StorageHeaders() + .witServerSideEncryption(sse) + .withLeaseIdHeader(leaseId) + .withAdditionalHeaders(additionalHeaders) + .headers + + private def copy(versionId: Option[String] = versionId, + leaseId: Option[String] = leaseId, + sse: Option[ServerSideEncryption] = sse, + additionalHeaders: Seq[HttpHeader] = additionalHeaders) = + new DeleteFile(versionId = versionId, leaseId = leaseId, sse = sse, additionalHeaders = additionalHeaders) +} + +object DeleteFile { + + /* + * Scala API + */ + def apply(): DeleteFile = new DeleteFile() + + /* + * Java API + */ + def create(): DeleteFile = new DeleteFile() +} diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/GetBlob.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/GetBlob.scala new file mode 100644 index 0000000000..7cb8643779 --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/GetBlob.scala @@ -0,0 +1,74 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package requests + +import akka.http.javadsl.model.headers.{ByteRange => JavaByteRange} +import akka.http.scaladsl.model.headers.ByteRange +import akka.http.scaladsl.model.{HttpHeader, HttpMethod, HttpMethods} +import akka.stream.alpakka.azure.storage.headers.ServerSideEncryption +import akka.stream.alpakka.azure.storage.impl.StorageHeaders + +final class GetBlob(val versionId: Option[String] = None, + val range: Option[ByteRange] = None, + val leaseId: Option[String] = None, + override val sse: Option[ServerSideEncryption] = None, + override val additionalHeaders: Seq[HttpHeader] = Seq.empty) + extends RequestBuilder { + + override protected val method: HttpMethod = HttpMethods.GET + + override protected val queryParams: Map[String, String] = super.queryParams ++ + versionId.map(value => "versionId" -> value).toMap + + def withVersionId(versionId: String): GetBlob = copy(versionId = Option(versionId)) + + /** Java API */ + def withRange(range: JavaByteRange): GetBlob = withRange(range.asInstanceOf[ByteRange]) + + /** Scala API */ + def withRange(range: ByteRange): GetBlob = copy(range = Option(range)) + + def withLeaseId(leaseId: String): GetBlob = copy(leaseId = Option(leaseId)) + + override def withServerSideEncryption(sse: ServerSideEncryption): GetBlob = copy(sse = Option(sse)) + + override def addHeader(httpHeader: HttpHeader): GetBlob = + copy(additionalHeaders = additionalHeaders :+ httpHeader) + + private def copy(versionId: Option[String] = versionId, + range: Option[ByteRange] = range, + leaseId: Option[String] = leaseId, + sse: Option[ServerSideEncryption] = sse, + additionalHeaders: Seq[HttpHeader] = additionalHeaders) = + new GetBlob(versionId = versionId, + range = range, + leaseId = leaseId, + sse = sse, + additionalHeaders = additionalHeaders) + + override protected def getHeaders: Seq[HttpHeader] = + StorageHeaders() + .witServerSideEncryption(sse) + .withRangeHeader(range) + .withLeaseIdHeader(leaseId) + .withAdditionalHeaders(additionalHeaders) + .headers +} + +object GetBlob { + + /* + * Scala API + */ + def apply(): GetBlob = new GetBlob() + + /* + * Java API + */ + def create(): GetBlob = new GetBlob() +} diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/GetFile.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/GetFile.scala new file mode 100644 index 0000000000..16289ef356 --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/GetFile.scala @@ -0,0 +1,74 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package requests + +import akka.http.javadsl.model.headers.{ByteRange => JavaByteRange} +import akka.http.scaladsl.model.headers.ByteRange +import akka.http.scaladsl.model.{HttpHeader, HttpMethod, HttpMethods} +import akka.stream.alpakka.azure.storage.headers.ServerSideEncryption +import akka.stream.alpakka.azure.storage.impl.StorageHeaders + +final class GetFile(val versionId: Option[String] = None, + val range: Option[ByteRange] = None, + val leaseId: Option[String] = None, + override val sse: Option[ServerSideEncryption] = None, + override val additionalHeaders: Seq[HttpHeader] = Seq.empty) + extends RequestBuilder { + + override protected val method: HttpMethod = HttpMethods.GET + + override protected val queryParams: Map[String, String] = super.queryParams ++ + versionId.map(value => "versionId" -> value).toMap + + def withVersionId(versionId: String): GetFile = copy(versionId = Option(versionId)) + + /** Java API */ + def withRange(range: JavaByteRange): GetFile = withRange(range.asInstanceOf[ByteRange]) + + /** Scala API */ + def withRange(range: ByteRange): GetFile = copy(range = Option(range)) + + def withLeaseId(leaseId: String): GetFile = copy(leaseId = Option(leaseId)) + + override def withServerSideEncryption(sse: ServerSideEncryption): GetFile = copy(sse = Option(sse)) + + override def addHeader(httpHeader: HttpHeader): GetFile = + copy(additionalHeaders = additionalHeaders :+ httpHeader) + + private def copy(versionId: Option[String] = versionId, + range: Option[ByteRange] = range, + leaseId: Option[String] = leaseId, + sse: Option[ServerSideEncryption] = sse, + additionalHeaders: Seq[HttpHeader] = additionalHeaders) = + new GetFile(versionId = versionId, + range = range, + leaseId = leaseId, + sse = sse, + additionalHeaders = additionalHeaders) + + override protected def getHeaders: Seq[HttpHeader] = + StorageHeaders() + .witServerSideEncryption(sse) + .withRangeHeader(range) + .withLeaseIdHeader(leaseId) + .withAdditionalHeaders(additionalHeaders) + .headers +} + +object GetFile { + + /* + * Scala API + */ + def apply(): GetFile = new GetFile() + + /* + * Java API + */ + def create(): GetFile = new GetFile() +} diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/GetProperties.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/GetProperties.scala new file mode 100644 index 0000000000..b56d3d6a7d --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/GetProperties.scala @@ -0,0 +1,59 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package requests + +import akka.http.scaladsl.model.{HttpHeader, HttpMethod, HttpMethods} +import akka.stream.alpakka.azure.storage.headers.ServerSideEncryption +import akka.stream.alpakka.azure.storage.impl.StorageHeaders + +final class GetProperties(val versionId: Option[String] = None, + val leaseId: Option[String] = None, + override val sse: Option[ServerSideEncryption] = None, + override val additionalHeaders: Seq[HttpHeader] = Seq.empty) + extends RequestBuilder { + + override protected val method: HttpMethod = HttpMethods.HEAD + + override protected val queryParams: Map[String, String] = super.queryParams ++ + versionId.map(value => "versionId" -> value).toMap + + def withVersionId(versionId: String): GetProperties = copy(versionId = Option(versionId)) + + def withLeaseId(leaseId: String): GetProperties = copy(leaseId = Option(leaseId)) + + override def withServerSideEncryption(sse: ServerSideEncryption): GetProperties = copy(sse = Option(sse)) + + override def addHeader(httpHeader: HttpHeader): GetProperties = + copy(additionalHeaders = additionalHeaders :+ httpHeader) + + override protected def getHeaders: Seq[HttpHeader] = + StorageHeaders() + .witServerSideEncryption(sse) + .withLeaseIdHeader(leaseId) + .withAdditionalHeaders(additionalHeaders) + .headers + + private def copy(versionId: Option[String] = versionId, + leaseId: Option[String] = leaseId, + sse: Option[ServerSideEncryption] = sse, + additionalHeaders: Seq[HttpHeader] = additionalHeaders) = + new GetProperties(versionId = versionId, leaseId = leaseId, sse = sse, additionalHeaders = additionalHeaders) +} + +object GetProperties { + + /* + * Scala API + */ + def apply(): GetProperties = new GetProperties() + + /* + * Java API + */ + def create(): GetProperties = new GetProperties() +} diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/PutAppendBlock.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/PutAppendBlock.scala new file mode 100644 index 0000000000..57a64fda03 --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/PutAppendBlock.scala @@ -0,0 +1,57 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package requests + +import akka.http.javadsl.model.{ContentType => JavaContentType} +import akka.http.scaladsl.model.{ContentType, HttpHeader, HttpMethod, HttpMethods} +import akka.stream.alpakka.azure.storage.headers.{BlobTypeHeader, ServerSideEncryption} +import akka.stream.alpakka.azure.storage.impl.StorageHeaders + +final class PutAppendBlock(val contentType: ContentType, + val leaseId: Option[String] = None, + override val sse: Option[ServerSideEncryption] = None, + override val additionalHeaders: Seq[HttpHeader] = Seq.empty) + extends RequestBuilder { + + override protected val method: HttpMethod = HttpMethods.PUT + + def withLeaseId(leaseId: String): PutAppendBlock = copy(leaseId = Option(leaseId)) + + override def withServerSideEncryption(sse: ServerSideEncryption): PutAppendBlock = copy(sse = Option(sse)) + + override def addHeader(httpHeader: HttpHeader): PutAppendBlock = + copy(additionalHeaders = additionalHeaders :+ httpHeader) + + override protected def getHeaders: Seq[HttpHeader] = + StorageHeaders() + .withContentLengthHeader(0L) + .withContentTypeHeader(contentType) + .witServerSideEncryption(sse) + .withBlobTypeHeader(BlobTypeHeader.PageBlobHeader) + .withLeaseIdHeader(leaseId) + .withAdditionalHeaders(additionalHeaders) + .headers + + private def copy(leaseId: Option[String] = leaseId, + sse: Option[ServerSideEncryption] = sse, + additionalHeaders: Seq[HttpHeader] = additionalHeaders) = + new PutAppendBlock(contentType = contentType, leaseId = leaseId, sse = sse, additionalHeaders = additionalHeaders) +} + +object PutAppendBlock { + + /* + * Scala API + */ + def apply(contentType: ContentType): PutAppendBlock = new PutAppendBlock(contentType) + + /* + * Java API + */ + def create(contentType: JavaContentType): PutAppendBlock = PutAppendBlock(contentType.asInstanceOf[ContentType]) +} diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/PutBlockBlob.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/PutBlockBlob.scala new file mode 100644 index 0000000000..f7647ae46f --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/PutBlockBlob.scala @@ -0,0 +1,63 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package requests + +import akka.http.javadsl.model.{ContentType => JavaContentType} +import akka.http.scaladsl.model.{ContentType, HttpHeader, HttpMethod, HttpMethods} +import akka.stream.alpakka.azure.storage.headers.{BlobTypeHeader, ServerSideEncryption} +import akka.stream.alpakka.azure.storage.impl.StorageHeaders + +final class PutBlockBlob(val contentLength: Long, + val contentType: ContentType, + val leaseId: Option[String] = None, + override val sse: Option[ServerSideEncryption] = None, + override val additionalHeaders: Seq[HttpHeader] = Seq.empty) + extends RequestBuilder { + + override protected val method: HttpMethod = HttpMethods.PUT + + def withLeaseId(leaseId: String): PutBlockBlob = copy(leaseId = Option(leaseId)) + + override def withServerSideEncryption(sse: ServerSideEncryption): PutBlockBlob = copy(sse = Option(sse)) + + override def addHeader(httpHeader: HttpHeader): PutBlockBlob = + copy(additionalHeaders = additionalHeaders :+ httpHeader) + + override protected def getHeaders: Seq[HttpHeader] = + StorageHeaders() + .withContentLengthHeader(contentLength) + .withContentTypeHeader(contentType) + .witServerSideEncryption(sse) + .withBlobTypeHeader(BlobTypeHeader.BlockBlobHeader) + .withLeaseIdHeader(leaseId) + .withAdditionalHeaders(additionalHeaders) + .headers + + private def copy(leaseId: Option[String] = leaseId, + sse: Option[ServerSideEncryption] = sse, + additionalHeaders: Seq[HttpHeader] = additionalHeaders) = + new PutBlockBlob(contentLength = contentLength, + contentType = contentType, + leaseId = leaseId, + sse = sse, + additionalHeaders = additionalHeaders) +} + +object PutBlockBlob { + + /* + * Scala API + */ + def apply(contentLength: Long, contentType: ContentType): PutBlockBlob = new PutBlockBlob(contentLength, contentType) + + /* + * Java API + */ + def create(contentLength: Long, contentType: JavaContentType): PutBlockBlob = + PutBlockBlob(contentLength, contentType.asInstanceOf[ContentType]) +} diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/PutPageBlock.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/PutPageBlock.scala new file mode 100644 index 0000000000..4349fc5f46 --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/PutPageBlock.scala @@ -0,0 +1,71 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package requests + +import akka.http.javadsl.model.{ContentType => JavaContentType} +import akka.http.scaladsl.model.{ContentType, HttpHeader, HttpMethod, HttpMethods} +import akka.stream.alpakka.azure.storage.headers.{BlobTypeHeader, ServerSideEncryption} +import akka.stream.alpakka.azure.storage.impl.StorageHeaders + +final class PutPageBlock(val maxBlockSize: Long, + val contentType: ContentType, + val leaseId: Option[String] = None, + val blobSequenceNumber: Option[Int] = None, + override val sse: Option[ServerSideEncryption] = None, + override val additionalHeaders: Seq[HttpHeader] = Seq.empty) + extends RequestBuilder { + + override protected val method: HttpMethod = HttpMethods.PUT + + def withLeaseId(leaseId: String): PutPageBlock = copy(leaseId = Option(leaseId)) + + def withBlobSequenceNumber(blobSequenceNumber: Int): PutPageBlock = + copy(blobSequenceNumber = Option(blobSequenceNumber)) + + override def withServerSideEncryption(sse: ServerSideEncryption): PutPageBlock = copy(sse = Option(sse)) + + override def addHeader(httpHeader: HttpHeader): PutPageBlock = + copy(additionalHeaders = additionalHeaders :+ httpHeader) + + override protected def getHeaders: Seq[HttpHeader] = + StorageHeaders() + .withContentLengthHeader(0L) + .withPageBlobContentLengthHeader(maxBlockSize) + .withContentTypeHeader(contentType) + .witServerSideEncryption(sse) + .withBlobTypeHeader(BlobTypeHeader.PageBlobHeader) + .withLeaseIdHeader(leaseId) + .withPageBlobSequenceNumberHeader(blobSequenceNumber) + .withAdditionalHeaders(additionalHeaders) + .headers + + private def copy(leaseId: Option[String] = leaseId, + blobSequenceNumber: Option[Int] = blobSequenceNumber, + sse: Option[ServerSideEncryption] = sse, + additionalHeaders: Seq[HttpHeader] = additionalHeaders) = + new PutPageBlock(maxBlockSize = maxBlockSize, + contentType = contentType, + leaseId = leaseId, + blobSequenceNumber = blobSequenceNumber, + sse = sse, + additionalHeaders = additionalHeaders) +} + +object PutPageBlock { + + /* + * Scala API + */ + def apply(maxBlockSize: Long, contentType: ContentType): PutPageBlock = new PutPageBlock(maxBlockSize, contentType) + + /* + * Java API + */ + def create(maxBlockSize: Long, contentType: JavaContentType): PutPageBlock = + new PutPageBlock(maxBlockSize, contentType.asInstanceOf[ContentType]) +} diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/RequestBuilder.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/RequestBuilder.scala new file mode 100644 index 0000000000..8ba317647e --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/RequestBuilder.scala @@ -0,0 +1,70 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package requests + +import akka.http.scaladsl.model.Uri.Query +import akka.http.scaladsl.model.headers.RawHeader +import akka.http.scaladsl.model.{HttpHeader, HttpMethod, HttpRequest, Uri} +import akka.stream.alpakka.azure.storage.headers.ServerSideEncryption + +abstract class RequestBuilder(val sse: Option[ServerSideEncryption] = None, + val additionalHeaders: Seq[HttpHeader] = Seq.empty) { + + protected val method: HttpMethod + + protected def queryParams: Map[String, String] = Map.empty + + def withServerSideEncryption(sse: ServerSideEncryption): RequestBuilder + + def addHeader(httpHeader: HttpHeader): RequestBuilder + + def addHeader(name: String, value: String): RequestBuilder = addHeader(RawHeader(name, value)) + + protected def getHeaders: Seq[HttpHeader] + + private[storage] def createRequest(settings: StorageSettings, storageType: String, objectPath: String): HttpRequest = + HttpRequest( + method = method, + uri = createUri(settings = settings, + storageType = storageType, + objectPath = objectPath, + queryString = createQueryString(settings, Some(Query(queryParams).toString()))), + headers = getHeaders + ) + + private def createUri(settings: StorageSettings, + storageType: String, + objectPath: String, + queryString: Option[String]): Uri = { + val accountName = settings.azureNameKeyCredential.accountName + val path = if (objectPath.startsWith("/")) objectPath else s"/$objectPath" + settings.endPointUrl + .map { endPointUrl => + val qs = queryString.getOrElse("") + Uri(endPointUrl).withPath(Uri.Path(s"/$accountName$path")).withQuery(Uri.Query(qs)) + } + .getOrElse( + Uri.from( + scheme = "https", + host = s"$accountName.$storageType.core.windows.net", + path = Uri.Path(path).toString(), + queryString = queryString + ) + ) + } + + private def createQueryString(settings: StorageSettings, apiQueryString: Option[String]): Option[String] = { + if (settings.authorizationType == SasAuthorizationType) { + if (settings.sasToken.isEmpty) throw new RuntimeException("SAS token must be defined for SAS authorization type.") + else { + val sasToken = settings.sasToken.get + Some(apiQueryString.map(qs => s"$sasToken&$qs").getOrElse(sasToken)) + } + } else apiQueryString + } +} diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/UpdateFileRange.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/UpdateFileRange.scala new file mode 100644 index 0000000000..e71ca27bc0 --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/UpdateFileRange.scala @@ -0,0 +1,67 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package requests + +import akka.http.javadsl.model.{ContentType => JavaContentType} +import akka.http.scaladsl.model.headers.ByteRange +import akka.http.scaladsl.model.{ContentType, HttpHeader, HttpMethod, HttpMethods} +import akka.stream.alpakka.azure.storage.headers.{RangeWriteTypeHeader, ServerSideEncryption} +import akka.stream.alpakka.azure.storage.impl.StorageHeaders + +final class UpdateFileRange(val range: ByteRange.Slice, + val contentType: ContentType, + val leaseId: Option[String] = None, + override val sse: Option[ServerSideEncryption] = None, + override val additionalHeaders: Seq[HttpHeader] = Seq.empty) + extends RequestBuilder { + + override protected val method: HttpMethod = HttpMethods.PUT + + override protected val queryParams: Map[String, String] = super.queryParams ++ Map("comp" -> "range") + + def withLeaseId(leaseId: String): UpdateFileRange = copy(leaseId = Option(leaseId)) + + override def withServerSideEncryption(sse: ServerSideEncryption): UpdateFileRange = copy(sse = Option(sse)) + + override def addHeader(httpHeader: HttpHeader): UpdateFileRange = + copy(additionalHeaders = additionalHeaders :+ httpHeader) + + private def copy(leaseId: Option[String] = leaseId, + sse: Option[ServerSideEncryption] = sse, + additionalHeaders: Seq[HttpHeader] = additionalHeaders) = + new UpdateFileRange(contentType = contentType, + range = range, + leaseId = leaseId, + sse = sse, + additionalHeaders = additionalHeaders) + + override protected def getHeaders: Seq[HttpHeader] = + StorageHeaders() + .withContentLengthHeader(range.last - range.first + 1) + .withContentTypeHeader(contentType) + .withRangeHeader(range) + .withLeaseIdHeader(leaseId) + .withRangeWriteTypeHeader(RangeWriteTypeHeader.UpdateFileHeader) + .witServerSideEncryption(sse) + .withAdditionalHeaders(additionalHeaders) + .headers +} + +object UpdateFileRange { + + /* + * Scala API + */ + def apply(range: ByteRange.Slice, contentType: ContentType): UpdateFileRange = new UpdateFileRange(range, contentType) + + /* + * Java API + */ + def create(range: ByteRange.Slice, contentType: JavaContentType): UpdateFileRange = + UpdateFileRange(range, contentType.asInstanceOf[ContentType]) +} diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala index afdfb8827d..20733642f5 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala @@ -8,10 +8,17 @@ package storage package scaladsl import akka.NotUsed -import akka.http.scaladsl.model.{ContentType, ContentTypes, HttpEntity} -import akka.http.scaladsl.model.headers.ByteRange -import akka.stream.alpakka.azure.storage.headers.BlobTypeHeader +import akka.http.scaladsl.model.HttpEntity import akka.stream.alpakka.azure.storage.impl.AzureStorageStream +import akka.stream.alpakka.azure.storage.requests.{ + CreateContainer, + DeleteBlob, + GetBlob, + GetProperties, + PutAppendBlock, + PutBlockBlob, + PutPageBlock +} import akka.stream.scaladsl.Source import akka.util.ByteString @@ -26,139 +33,81 @@ object BlobService { * Gets blob representing `objectPath` with specified range (if applicable). * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` - * @param range range to download - * @param versionId versionId of the blob (if applicable) - * @param leaseId lease ID of an active lease (if applicable) + * @param requestBuilder builder to build getBlob request * @return A [[akka.stream.scaladsl.Source]] containing the objects data as a [[akka.util.ByteString]] along with a * materialized value containing the [[akka.stream.alpakka.azure.storage.ObjectMetadata]] */ - def getBlob(objectPath: String, - range: Option[ByteRange] = None, - versionId: Option[String] = None, - leaseId: Option[String] = None): Source[ByteString, Future[ObjectMetadata]] = - AzureStorageStream.getObject(BlobType, - objectPath, - versionId, - StorageHeaders().withRangeHeader(range).withLeaseIdHeader(leaseId).headers) + def getBlob(objectPath: String, requestBuilder: GetBlob): Source[ByteString, Future[ObjectMetadata]] = + AzureStorageStream.getObject(BlobType, objectPath, requestBuilder) /** * Gets blob properties. * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` - * @param versionId versionId of the blob (if applicable) - * @param leaseId lease ID of an active lease (if applicable) + * @param requestBuilder versionId of the blob (if applicable) * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ - def getProperties(objectPath: String, - versionId: Option[String] = None, - leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.getObjectProperties(BlobType, - objectPath, - versionId, - StorageHeaders().withLeaseIdHeader(leaseId).headers) + def getProperties(objectPath: String, requestBuilder: GetProperties): Source[Option[ObjectMetadata], NotUsed] = + AzureStorageStream.getObjectProperties(BlobType, objectPath, requestBuilder) /** * Deletes blob. * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` - * @param versionId versionId of the blob (if applicable) - * @param leaseId lease ID of an active lease (if applicable) + * @param requestBuilder builder to build deleteBlob request * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ - def deleteBlob(objectPath: String, - versionId: Option[String] = None, - leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.deleteObject(BlobType, - objectPath, - versionId, - StorageHeaders().withLeaseIdHeader(leaseId).headers) + def deleteBlob(objectPath: String, requestBuilder: DeleteBlob): Source[Option[ObjectMetadata], NotUsed] = + AzureStorageStream.deleteObject(BlobType, objectPath, requestBuilder) /** * Put Block blob. * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` - * @param contentType content type of the blob - * @param contentLength length of the blob + * @param requestBuilder builder to build putBlockBlob request * @param payload actual payload, a [[akka.stream.scaladsl.Source Source]] of [[akka.util.ByteString ByteString]] - * @param leaseId lease ID of an active lease (if applicable) * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ def putBlockBlob(objectPath: String, - contentType: ContentType = ContentTypes.`application/octet-stream`, - contentLength: Long, - payload: Source[ByteString, _], - leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.putBlob( - objectPath, - Some(HttpEntity(contentType, contentLength, payload)), - StorageHeaders() - .withContentLengthHeader(contentLength) - .withContentTypeHeader(contentType) - .withBlobTypeHeader(BlobTypeHeader.BlockBlobHeader) - .withLeaseIdHeader(leaseId) - .headers - ) + requestBuilder: PutBlockBlob, + payload: Source[ByteString, _]): Source[Option[ObjectMetadata], NotUsed] = + AzureStorageStream + .putBlob(objectPath, + requestBuilder, + Some(HttpEntity(requestBuilder.contentType, requestBuilder.contentLength, payload))) /** * Put (Create) Page Blob. * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` - * @param contentType content type of the blob - * @param maxBlockSize maximum block size - * @param blobSequenceNumber optional block sequence number - * @param leaseId lease ID of an active lease (if applicable) + * @param requestBuilder builder to build putAppendBlob request * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ - def putPageBlock(objectPath: String, - contentType: ContentType = ContentTypes.`application/octet-stream`, - maxBlockSize: Long, - blobSequenceNumber: Option[Int] = None, - leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.putBlob( - objectPath, - None, - StorageHeaders() - .withContentTypeHeader(contentType) - .withBlobTypeHeader(BlobTypeHeader.PageBlobHeader) - .withPageBlobContentLengthHeader(maxBlockSize) - .withPageBlobSequenceNumberHeader(blobSequenceNumber) - .withLeaseIdHeader(leaseId) - .headers - ) + def putPageBlock(objectPath: String, requestBuilder: PutPageBlock): Source[Option[ObjectMetadata], NotUsed] = + AzureStorageStream.putBlob(objectPath, requestBuilder, None) /** * Put (Create) Append Blob. * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` - * @param contentType content type of the blob - * @param leaseId lease ID of an active lease (if applicable) * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ - def putAppendBlock(objectPath: String, - contentType: ContentType = ContentTypes.`application/octet-stream`, - leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.putBlob( - objectPath, - None, - StorageHeaders() - .withContentTypeHeader(contentType) - .withBlobTypeHeader(BlobTypeHeader.AppendBlobHeader) - .withLeaseIdHeader(leaseId) - .headers - ) + def putAppendBlock(objectPath: String, requestBuilder: PutAppendBlock): Source[Option[ObjectMetadata], NotUsed] = + AzureStorageStream.putBlob(objectPath, requestBuilder, None) /** * Create container. * - * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/container/blob` + * @param objectPath name of the container + * @param requestBuilder builder to build createContainer request * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ - def createContainer(objectPath: String): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.createContainer(objectPath) + def createContainer(objectPath: String, requestBuilder: CreateContainer): Source[Option[ObjectMetadata], NotUsed] = + AzureStorageStream.createContainer(objectPath, requestBuilder) } diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala index c114ed345a..4624b7f736 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala @@ -8,10 +8,16 @@ package storage package scaladsl import akka.NotUsed -import akka.http.scaladsl.model.{ContentType, ContentTypes, HttpEntity} -import akka.http.scaladsl.model.headers.ByteRange -import akka.stream.alpakka.azure.storage.headers.RangeWriteTypeHeader +import akka.http.scaladsl.model.HttpEntity import akka.stream.alpakka.azure.storage.impl.AzureStorageStream +import akka.stream.alpakka.azure.storage.requests.{ + ClearFileRange, + CreateFile, + DeleteFile, + GetFile, + GetProperties, + UpdateFileRange +} import akka.stream.scaladsl.Source import akka.util.ByteString @@ -26,106 +32,62 @@ object FileService { * Gets file representing `objectPath` with specified range (if applicable). * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/share/my-directory/blob` - * @param range range to download - * @param versionId versionId of the file (if applicable) - * @param leaseId lease ID of an active lease (if applicable) + * @param requestBuilder builder to build getFile request * @return A [[akka.stream.scaladsl.Source]] containing the objects data as a [[akka.util.ByteString]] along with a * materialized value containing the [[akka.stream.alpakka.azure.storage.ObjectMetadata]] */ - def getFile(objectPath: String, - range: Option[ByteRange] = None, - versionId: Option[String] = None, - leaseId: Option[String] = None): Source[ByteString, Future[ObjectMetadata]] = - AzureStorageStream.getObject(FileType, - objectPath, - versionId, - StorageHeaders().withRangeHeader(range).withLeaseIdHeader(leaseId).headers) + def getFile(objectPath: String, requestBuilder: GetFile): Source[ByteString, Future[ObjectMetadata]] = + AzureStorageStream.getObject(FileType, objectPath, requestBuilder) /** * Gets file properties. * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/share/my-directory/blob` - * @param versionId versionId of the file (if applicable) - * @param leaseId lease ID of an active lease (if applicable) + * @param requestBuilder builder to build getFile properties request * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ - def getProperties(objectPath: String, - versionId: Option[String] = None, - leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.getObjectProperties(FileType, - objectPath, - versionId, - StorageHeaders().withLeaseIdHeader(leaseId).headers) + def getProperties(objectPath: String, requestBuilder: GetProperties): Source[Option[ObjectMetadata], NotUsed] = + AzureStorageStream.getObjectProperties(FileType, objectPath, requestBuilder) /** * Deletes file. * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/share/my-directory/blob` - * @param versionId versionId of the file (if applicable) - * @param leaseId lease ID of an active lease (if applicable) + * @param requestBuilder builder to build deleteFile request * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ - def deleteFile(objectPath: String, - versionId: Option[String] = None, - leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.deleteObject(FileType, - objectPath, - versionId, - StorageHeaders().withLeaseIdHeader(leaseId).headers) + def deleteFile(objectPath: String, requestBuilder: DeleteFile): Source[Option[ObjectMetadata], NotUsed] = + AzureStorageStream.deleteObject(FileType, objectPath, requestBuilder) /** * Creates a file. * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/share/my-directory/blob` - * @param contentType content type of the blob - * @param maxSize maximum size of the file - * @param leaseId lease ID of an active lease (if applicable) + * @param requestBuilder builder to build createFile request * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ - def createFile(objectPath: String, - contentType: ContentType = ContentTypes.`application/octet-stream`, - maxSize: Long, - leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.createFile( - objectPath, - StorageHeaders() - .withContentTypeHeader(contentType) - .withFileMaxContentLengthHeader(maxSize) - .withFileTypeHeader() - .withLeaseIdHeader(leaseId) - .headers - ) + def createFile(objectPath: String, requestBuilder: CreateFile): Source[Option[ObjectMetadata], NotUsed] = + AzureStorageStream.createFile(objectPath, requestBuilder) /** * Updates file on the specified range. * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/share/my-directory/blob` - * @param contentType content type of the blob - * @param range range of bytes to be written + * @param requestBuilder builder to build updateRange request * @param payload actual payload, a [[akka.stream.scaladsl.Source Source]] of [[akka.util.ByteString ByteString]] - * @param leaseId lease ID of an active lease (if applicable) * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ def updateRange(objectPath: String, - contentType: ContentType = ContentTypes.`application/octet-stream`, - range: ByteRange.Slice, - payload: Source[ByteString, _], - leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = { - val contentLength = range.last - range.first + 1 + requestBuilder: UpdateFileRange, + payload: Source[ByteString, _]): Source[Option[ObjectMetadata], NotUsed] = { AzureStorageStream.updateRange( objectPath, - HttpEntity(contentType, contentLength, payload), - StorageHeaders() - .withContentLengthHeader(contentLength) - .withContentTypeHeader(contentType) - .withRangeHeader(range) - .withLeaseIdHeader(leaseId) - .withRangeWriteTypeHeader(RangeWriteTypeHeader.UpdateFileHeader) - .headers + HttpEntity(requestBuilder.contentType, requestBuilder.range.last - requestBuilder.range.first + 1, payload), + requestBuilder ) } @@ -133,21 +95,10 @@ object FileService { * Clears specified range from the file. * * @param objectPath path of the object, should start with "/" and separated by `/`, e.g. `/share/my-directory/blob` - * @param range range of bytes to be cleared - * @param leaseId lease ID of an active lease (if applicable) + * @param requestBuilder builder to build clearRange request * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ - def clearRange(objectPath: String, - range: ByteRange.Slice, - leaseId: Option[String] = None): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.clearRange( - objectPath, - StorageHeaders() - .withContentLengthHeader(0L) - .withRangeHeader(range) - .withLeaseIdHeader(leaseId) - .withRangeWriteTypeHeader(RangeWriteTypeHeader.ClearFileHeader) - .headers - ) + def clearRange(objectPath: String, requestBuilder: ClearFileRange): Source[Option[ObjectMetadata], NotUsed] = + AzureStorageStream.clearRange(objectPath, requestBuilder) } diff --git a/azure-storage/src/test/java/docs/javadsl/RequestBuilderTest.java b/azure-storage/src/test/java/docs/javadsl/RequestBuilderTest.java new file mode 100644 index 0000000000..72d723beca --- /dev/null +++ b/azure-storage/src/test/java/docs/javadsl/RequestBuilderTest.java @@ -0,0 +1,83 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package docs.javadsl; + +import akka.http.javadsl.model.ContentTypes; +import akka.http.javadsl.model.headers.ByteRange; +import akka.http.scaladsl.model.headers.RawHeader; +import akka.stream.alpakka.azure.storage.headers.ServerSideEncryption; +import akka.stream.alpakka.azure.storage.requests.CreateFile; +import akka.stream.alpakka.azure.storage.requests.GetBlob; +import akka.stream.alpakka.azure.storage.requests.PutBlockBlob; +import akka.stream.alpakka.testkit.javadsl.LogCapturingJunit4; +import org.junit.Assert; +import org.junit.Rule; +import org.junit.Test; +import scala.Option; + +public class RequestBuilderTest { + + @Rule + public final LogCapturingJunit4 logCapturing = new LogCapturingJunit4(); + + @Test + public void createSimpleRequest() { + + //#simple-request-builder + final GetBlob requestBuilder = GetBlob.create(); + //#simple-request-builder + + Assert.assertEquals(Option.empty(), requestBuilder.versionId()); + Assert.assertEquals(Option.empty(), requestBuilder.range()); + Assert.assertEquals(Option.empty(), requestBuilder.leaseId()); + Assert.assertEquals(Option.empty(), requestBuilder.sse()); + } + + @Test + public void populateRequestBuilder() { + + //#populate-request-builder + final var requestBuilder = GetBlob.create().withLeaseId("my-lease-id").withRange(ByteRange.createSlice(0, 25)); + //#populate-request-builder + + Assert.assertEquals(Option.apply("my-lease-id"), requestBuilder.leaseId()); + Assert.assertEquals(Option.apply(ByteRange.createSlice(0, 25)), requestBuilder.range()); + Assert.assertEquals(Option.empty(), requestBuilder.sse()); + } + + @Test + public void createRequestBuilderWithMandatoryParams() { + + //#request-builder-with-initial-values + final var requestBuilder = CreateFile.create(256L, ContentTypes.TEXT_PLAIN_UTF8); + //#request-builder-with-initial-values + + Assert.assertEquals(Option.empty(), requestBuilder.leaseId()); + Assert.assertEquals(256L, requestBuilder.maxFileSize()); + Assert.assertEquals(ContentTypes.TEXT_PLAIN_UTF8, requestBuilder.contentType()); + } + + @Test + public void populateServerSideEncryption() { + + //#request-builder-with-sse + final var requestBuilder = PutBlockBlob.create(256L, ContentTypes.TEXT_PLAIN_UTF8) + .withServerSideEncryption(ServerSideEncryption.customerKey("SGVsbG9Xb3JsZA==")); + //#request-builder-with-sse + + Assert.assertEquals(ServerSideEncryption.customerKey("SGVsbG9Xb3JsZA=="), requestBuilder.sse().get()); + } + + @Test + public void populateAdditionalHeaders() { + + //#request-builder-with-additional-headers + final var requestBuilder = GetBlob.create().addHeader("If-Match", "foobar"); + //#request-builder-with-additional-headers + + Assert.assertEquals(1, requestBuilder.additionalHeaders().size()); + Assert.assertEquals(new RawHeader("If-Match", "foobar"), requestBuilder.additionalHeaders().head()); + } +} diff --git a/azure-storage/src/test/java/docs/javadsl/StorageTest.java b/azure-storage/src/test/java/docs/javadsl/StorageTest.java index 5b4adf7e48..e8c844fa6b 100644 --- a/azure-storage/src/test/java/docs/javadsl/StorageTest.java +++ b/azure-storage/src/test/java/docs/javadsl/StorageTest.java @@ -11,6 +11,17 @@ import akka.stream.alpakka.azure.storage.ObjectMetadata; import akka.stream.alpakka.azure.storage.javadsl.BlobService; import akka.stream.alpakka.azure.storage.javadsl.FileService; +import akka.stream.alpakka.azure.storage.requests.ClearFileRange; +import akka.stream.alpakka.azure.storage.requests.CreateContainer; +import akka.stream.alpakka.azure.storage.requests.CreateFile; +import akka.stream.alpakka.azure.storage.requests.DeleteFile; +import akka.stream.alpakka.azure.storage.requests.GetBlob; +import akka.stream.alpakka.azure.storage.requests.GetFile; +import akka.stream.alpakka.azure.storage.requests.GetProperties; +import akka.stream.alpakka.azure.storage.requests.PutAppendBlock; +import akka.stream.alpakka.azure.storage.requests.PutBlockBlob; +import akka.stream.alpakka.azure.storage.requests.PutPageBlock; +import akka.stream.alpakka.azure.storage.requests.UpdateFileRange; import akka.stream.alpakka.azure.storage.scaladsl.StorageWireMockBase; import akka.stream.alpakka.testkit.javadsl.LogCapturingJunit4; import akka.stream.javadsl.Source; @@ -58,7 +69,7 @@ public void createContainer() throws Exception { mockCreateContainer(); //#create-container - final Source, NotUsed> source = BlobService.createContainer(containerName()); + final Source, NotUsed> source = BlobService.createContainer(containerName(), CreateContainer.create()); final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); //#create-container @@ -82,10 +93,8 @@ public void putBlockBlob() throws Exception { //#put-block-blob final Source, NotUsed> source = BlobService.putBlockBlob(containerName() + "/" + blobName(), - ContentTypes.TEXT_PLAIN_UTF8, - contentLength(), - Source.single(ByteString.fromString(payload())), - Optional.empty()); + PutBlockBlob.create(contentLength(), ContentTypes.TEXT_PLAIN_UTF8), + Source.single(ByteString.fromString(payload()))); final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); //#put-block-blob @@ -94,6 +103,7 @@ public void putBlockBlob() throws Exception { Assert.assertTrue(optionalObjectMetadata.isPresent()); } + @Ignore("Test is failing due to multiple content length headers in the request.") @Test public void putPageBlob() throws Exception { mockPutPageBlob(); @@ -101,10 +111,7 @@ public void putPageBlob() throws Exception { //#put-page-blob final Source, NotUsed> source = BlobService.putPageBlock(containerName() + "/" + blobName(), - ContentTypes.TEXT_PLAIN_UTF8, - 512L, - Optional.of(0), - Optional.empty()); + PutPageBlock.create(512L, ContentTypes.TEXT_PLAIN_UTF8).withBlobSequenceNumber(0)); final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); //#put-page-blob @@ -113,6 +120,7 @@ public void putPageBlob() throws Exception { Assert.assertTrue(optionalObjectMetadata.isPresent()); } + @Ignore("Test is failing due to multiple content length headers in the request.") @Test public void putAppendBlob() throws Exception { mockPutAppendBlob(); @@ -120,8 +128,7 @@ public void putAppendBlob() throws Exception { //#put-append-blob final Source, NotUsed> source = BlobService.putAppendBlock(containerName() + "/" + blobName(), - ContentTypes.TEXT_PLAIN_UTF8, - Optional.empty()); + PutAppendBlock.create(ContentTypes.TEXT_PLAIN_UTF8)); final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); //#put-append-blob @@ -130,14 +137,13 @@ public void putAppendBlob() throws Exception { Assert.assertTrue(optionalObjectMetadata.isPresent()); } - @Test public void getBlob() throws Exception { mockGetBlob(); //#get-blob final Source> source = - BlobService.getBlob(containerName() + "/" + blobName(), Optional.empty(), Optional.empty()); + BlobService.getBlob(containerName() + "/" + blobName(), GetBlob.create()); final CompletionStage> eventualPayload = source.runWith(Sink.seq(), system); //#get-blob @@ -153,8 +159,7 @@ public void getBlobRange() throws Exception { //#get-blob-range final Source> source = - BlobService.getBlob(containerName() + "/" + blobName(), subRange(), - Optional.empty(), Optional.empty()); + BlobService.getBlob(containerName() + "/" + blobName(), GetBlob.create().withRange(subRange())); final CompletionStage> eventualPayload = source.runWith(Sink.seq(), system); //#get-blob-range @@ -170,7 +175,7 @@ public void getBlobProperties() throws Exception { //#get-blob-properties final Source, NotUsed> source = - BlobService.getProperties(containerName() + "/" + blobName(), Optional.empty(), Optional.empty()); + BlobService.getProperties(containerName() + "/" + blobName(), GetProperties.create()); final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); //#get-blob-properties @@ -189,7 +194,7 @@ public void deleteBlob() throws Exception { //#delete-blob final Source, NotUsed> source = - BlobService.deleteBlob(containerName() + "/" + blobName(), Optional.empty(), Optional.empty()); + BlobService.deleteBlob(containerName() + "/" + blobName(), DeleteFile.create()); final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); //#delete-blob @@ -207,7 +212,8 @@ public void createFile() throws Exception { //#create-file final Source, NotUsed> source = - FileService.createFile(containerName() + "/" + blobName(), ContentTypes.TEXT_PLAIN_UTF8, contentLength(), Optional.empty()); + FileService.createFile(containerName() + "/" + blobName(), + CreateFile.create(contentLength(), ContentTypes.TEXT_PLAIN_UTF8)); final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); //#create-file @@ -230,8 +236,8 @@ public void updateRange() throws Exception { //#update-range final Source, NotUsed> source = FileService.updateRange(containerName() + "/" + blobName(), - ContentTypes.TEXT_PLAIN_UTF8, contentRange(), Source.single(ByteString.fromString(payload())), - Optional.empty()); + UpdateFileRange.create(contentRange(), ContentTypes.TEXT_PLAIN_UTF8), + Source.single(ByteString.fromString(payload()))); final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); //#update-range @@ -249,7 +255,7 @@ public void getFile() throws Exception { //#get-file final Source> source = - FileService.getFile(containerName() + "/" + blobName(), Optional.empty(), Optional.empty()); + FileService.getFile(containerName() + "/" + blobName(), GetFile.create()); final CompletionStage> eventualPayload = source.runWith(Sink.seq(), system); //#get-file @@ -265,7 +271,7 @@ public void getFileProperties() throws Exception { //#get-file-properties final Source, NotUsed> source = - FileService.getProperties(containerName() + "/" + blobName(), Optional.empty(), Optional.empty()); + FileService.getProperties(containerName() + "/" + blobName(), GetProperties.create()); final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); //#get-file-properties @@ -288,7 +294,7 @@ public void clearRange() throws Exception { //#clear-range final Source, NotUsed> source = - FileService.clearRange(containerName() + "/" + blobName(), subRange(), Optional.empty()); + FileService.clearRange(containerName() + "/" + blobName(), ClearFileRange.create(subRange())); final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); //#clear-range @@ -306,7 +312,7 @@ public void deleteFile() throws Exception { //#delete-file final Source, NotUsed> source = - FileService.deleteFile(containerName() + "/" + blobName(), Optional.empty(), Optional.empty()); + FileService.deleteFile(containerName() + "/" + blobName(), DeleteFile.create()); final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); //#delete-file diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureIntegrationTest.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureIntegrationTest.scala index 1c2dfaa8a8..08a26bfdd1 100644 --- a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureIntegrationTest.scala +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureIntegrationTest.scala @@ -10,6 +10,14 @@ package scaladsl import akka.actor.ActorSystem import akka.http.scaladsl.model.ContentTypes import akka.http.scaladsl.model.headers.ByteRange +import akka.stream.alpakka.azure.storage.requests.{ + ClearFileRange, + CreateFile, + DeleteFile, + GetFile, + GetProperties, + UpdateFileRange +} import akka.stream.scaladsl.{Keep, Sink, Source} import akka.util.ByteString import org.scalatest.Ignore @@ -23,7 +31,8 @@ class AzureIntegrationTest extends StorageIntegrationSpec { "FileService" should { "create file" in { val maybeObjectMetadata = FileService - .createFile(objectPath = objectPath, contentType = ContentTypes.`text/plain(UTF-8)`, maxSize = contentLength) + .createFile(objectPath = objectPath, + requestBuilder = CreateFile(contentLength, ContentTypes.`text/plain(UTF-8)`)) .withAttributes(getDefaultAttributes) .runWith(Sink.head) .futureValue @@ -35,10 +44,11 @@ class AzureIntegrationTest extends StorageIntegrationSpec { "put range" in { val maybeObjectMetadata = FileService - .updateRange(objectPath = objectPath, - contentType = ContentTypes.`text/plain(UTF-8)`, - range = ByteRange(0, contentLength - 1), - payload = Source.single(ByteString(sampleText))) + .updateRange( + objectPath = objectPath, + requestBuilder = UpdateFileRange(ByteRange(0, contentLength - 1), ContentTypes.`text/plain(UTF-8)`), + payload = Source.single(ByteString(sampleText)) + ) .withAttributes(getDefaultAttributes) .runWith(Sink.head) .futureValue @@ -50,7 +60,7 @@ class AzureIntegrationTest extends StorageIntegrationSpec { "get file" in { val (maybeEventualObjectMetadata, eventualText) = FileService - .getFile(objectPath = objectPath) + .getFile(objectPath = objectPath, GetFile()) .via(framing) .map(byteString => byteString.utf8String + System.lineSeparator()) .toMat(Sink.seq)(Keep.both) @@ -65,7 +75,7 @@ class AzureIntegrationTest extends StorageIntegrationSpec { "get file properties" in { val maybeObjectMetadata = FileService - .getProperties(objectPath) + .getProperties(objectPath, GetProperties()) .withAttributes(getDefaultAttributes) .runWith(Sink.head) .futureValue @@ -80,7 +90,7 @@ class AzureIntegrationTest extends StorageIntegrationSpec { val range = ByteRange(0, 8) val (maybeEventualObjectMetadata, eventualText) = FileService - .getFile(objectPath, Some(range)) + .getFile(objectPath, GetFile().withRange(range)) .withAttributes(getDefaultAttributes) .via(framing) .map(_.utf8String) @@ -95,7 +105,7 @@ class AzureIntegrationTest extends StorageIntegrationSpec { "clear range" in { val range = ByteRange(16, 24) val maybeObjectMetadata = FileService - .clearRange(objectPath = objectPath, range = range) + .clearRange(objectPath = objectPath, requestBuilder = ClearFileRange(range)) .withAttributes(getDefaultAttributes) .runWith(Sink.head) .futureValue @@ -106,7 +116,7 @@ class AzureIntegrationTest extends StorageIntegrationSpec { "delete file" in { val maybeObjectMetadata = FileService - .deleteFile(objectPath) + .deleteFile(objectPath, DeleteFile()) .withAttributes(getDefaultAttributes) .toMat(Sink.head)(Keep.right) .run() @@ -118,7 +128,7 @@ class AzureIntegrationTest extends StorageIntegrationSpec { "get file after delete" in { val maybeObjectMetadata = FileService - .getProperties(objectPath) + .getProperties(objectPath, GetProperties()) .withAttributes(getDefaultAttributes) .toMat(Sink.head)(Keep.right) .run() diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageIntegrationSpec.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageIntegrationSpec.scala index 3ea7ab5d58..1c23463937 100644 --- a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageIntegrationSpec.scala +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageIntegrationSpec.scala @@ -13,6 +13,7 @@ import akka.http.scaladsl.Http import akka.http.scaladsl.model.ContentTypes import akka.http.scaladsl.model.headers.ByteRange import akka.stream.Attributes +import akka.stream.alpakka.azure.storage.requests.{CreateContainer, DeleteBlob, GetBlob, GetProperties, PutBlockBlob} import akka.stream.alpakka.testkit.scaladsl.LogCapturing import akka.stream.scaladsl.{Flow, Framing, Keep, Sink, Source} import akka.testkit.TestKit @@ -60,8 +61,7 @@ trait StorageIntegrationSpec BlobService .putBlockBlob( objectPath = objectPath, - contentType = ContentTypes.`text/plain(UTF-8)`, - contentLength = contentLength, + requestBuilder = PutBlockBlob(contentLength, ContentTypes.`text/plain(UTF-8)`), payload = Source.single(ByteString(sampleText)) ) .withAttributes(getDefaultAttributes) @@ -76,7 +76,7 @@ trait StorageIntegrationSpec "get blob" in { val (maybeEventualObjectMetadata, eventualText) = BlobService - .getBlob(objectPath) + .getBlob(objectPath, GetBlob()) .withAttributes(getDefaultAttributes) .via(framing) .map(byteString => byteString.utf8String + System.lineSeparator()) @@ -92,7 +92,7 @@ trait StorageIntegrationSpec "get blob properties" in { val maybeObjectMetadata = BlobService - .getProperties(objectPath) + .getProperties(objectPath, GetProperties()) .withAttributes(getDefaultAttributes) .runWith(Sink.head) .futureValue @@ -107,7 +107,7 @@ trait StorageIntegrationSpec val range = ByteRange.Slice(0, 8) val (maybeEventualObjectMetadata, eventualText) = BlobService - .getBlob(objectPath, Some(range)) + .getBlob(objectPath, GetBlob().withRange(range)) .withAttributes(getDefaultAttributes) .via(framing) .map(_.utf8String) @@ -122,7 +122,7 @@ trait StorageIntegrationSpec "delete blob" in { val maybeObjectMetadata = BlobService - .deleteBlob(objectPath) + .deleteBlob(objectPath, DeleteBlob()) .withAttributes(getDefaultAttributes) .toMat(Sink.head)(Keep.right) .run() @@ -134,7 +134,7 @@ trait StorageIntegrationSpec "get blob after delete" in { val maybeObjectMetadata = BlobService - .getProperties(objectPath) + .getProperties(objectPath, GetProperties()) .withAttributes(getDefaultAttributes) .toMat(Sink.head)(Keep.right) .run() @@ -146,7 +146,7 @@ trait StorageIntegrationSpec protected def createContainer(containerName: String): Future[Done] = { BlobService - .createContainer(containerName) + .createContainer(containerName, CreateContainer()) .withAttributes(getDefaultAttributes) .runWith(Sink.ignore) } diff --git a/azure-storage/src/test/scala/docs/scaladsl/RequestBuilderSpec.scala b/azure-storage/src/test/scala/docs/scaladsl/RequestBuilderSpec.scala new file mode 100644 index 0000000000..5083b0ce80 --- /dev/null +++ b/azure-storage/src/test/scala/docs/scaladsl/RequestBuilderSpec.scala @@ -0,0 +1,70 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package docs.scaladsl + +import akka.http.scaladsl.model.ContentTypes +import akka.http.scaladsl.model.headers.{ByteRange, RawHeader} +import akka.stream.alpakka.azure.storage.headers.ServerSideEncryption +import akka.stream.alpakka.azure.storage.requests.{CreateFile, GetBlob, PutBlockBlob} +import akka.stream.alpakka.testkit.scaladsl.LogCapturing +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers + +class RequestBuilderSpec extends AnyFlatSpec with Matchers with LogCapturing { + + it should "create request builder with default values" in { + + //#simple-request-builder + val requestBuilder = GetBlob() + //#simple-request-builder + + requestBuilder.versionId shouldBe empty + requestBuilder.range shouldBe empty + requestBuilder.leaseId shouldBe empty + requestBuilder.sse shouldBe empty + requestBuilder.additionalHeaders shouldBe empty + } + + it should "create request builder with values" in { + + //#populate-request-builder + val requestBuilder = GetBlob().withLeaseId("my-lease-id").withRange(ByteRange(0, 25)) + //#populate-request-builder + + requestBuilder.leaseId shouldBe Some("my-lease-id") + requestBuilder.range shouldBe Some(ByteRange(0, 25)) + requestBuilder.sse shouldBe empty + } + + it should "create request builder with initial values" in { + + //#request-builder-with-initial-values + val requestBuilder = CreateFile(256L, ContentTypes.`text/plain(UTF-8)`) + //#request-builder-with-initial-values + + requestBuilder.leaseId shouldBe empty + requestBuilder.maxFileSize shouldBe 256L + requestBuilder.contentType shouldBe ContentTypes.`text/plain(UTF-8)` + } + + it should "populate request builder with ServerSideEncryption" in { + + //#request-builder-with-sse + val requestBuilder = PutBlockBlob(256L, ContentTypes.`text/plain(UTF-8)`) + .withServerSideEncryption(ServerSideEncryption.customerKey("SGVsbG9Xb3JsZA==")) + //#request-builder-with-sse + + requestBuilder.sse shouldBe Some(ServerSideEncryption.customerKey("SGVsbG9Xb3JsZA==")) + } + + it should "populate request builder with additional headers" in { + + //#request-builder-with-additional-headers + val requestBuilder = GetBlob().addHeader("If-Match", "foobar") + //#request-builder-with-additional-headers + + requestBuilder.additionalHeaders shouldBe Seq(RawHeader("If-Match", "foobar")) + } +} diff --git a/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala b/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala index ffb5dfa3ba..4201acd6e7 100644 --- a/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala +++ b/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala @@ -6,6 +6,7 @@ package docs.scaladsl import akka.NotUsed import akka.http.scaladsl.model.ContentTypes +import akka.stream.alpakka.azure.storage.requests._ import akka.stream.alpakka.azure.storage.scaladsl.StorageWireMockBase import akka.stream.alpakka.azure.storage.scaladsl.StorageWireMockBase.ETagRawValue import akka.stream.alpakka.testkit.scaladsl.LogCapturing @@ -42,7 +43,8 @@ class StorageSpec import akka.stream.alpakka.azure.storage.scaladsl.BlobService import akka.stream.alpakka.azure.storage.ObjectMetadata - val source: Source[Option[ObjectMetadata], NotUsed] = BlobService.createContainer(containerName) + val source: Source[Option[ObjectMetadata], NotUsed] = + BlobService.createContainer(containerName, CreateContainer()) val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.runWith(Sink.head) //#create-container @@ -67,9 +69,8 @@ class StorageSpec val source: Source[Option[ObjectMetadata], NotUsed] = BlobService.putBlockBlob( objectPath = s"$containerName/$blobName", - contentType = ContentTypes.`text/plain(UTF-8)`, - contentLength = contentLength, - payload = Source.single(ByteString(payload)) + payload = Source.single(ByteString(payload)), + requestBuilder = PutBlockBlob(contentLength, ContentTypes.`text/plain(UTF-8)`) ) val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.runWith(Sink.head) @@ -85,7 +86,7 @@ class StorageSpec // TODO: There are couple of issues, firstly there are two `Content-Length` headers being added, one by `putBlob` // function and secondly by, most likely, by WireMock. Need to to figure out how to tell WireMock not to add `Content-Length` // header, secondly once that resolve then we get `akka.http.scaladsl.model.EntityStreamException`. - "put page blob" in { + "put page blob" ignore { mockPutPageBlob() //#put-page-blob @@ -95,9 +96,7 @@ class StorageSpec val source: Source[Option[ObjectMetadata], NotUsed] = BlobService.putPageBlock( objectPath = s"$containerName/$blobName", - contentType = ContentTypes.`text/plain(UTF-8)`, - maxBlockSize = 512L, - blobSequenceNumber = Some(0) + requestBuilder = PutPageBlock(512L, ContentTypes.`text/plain(UTF-8)`).withBlobSequenceNumber(0) ) val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.runWith(Sink.head) @@ -113,7 +112,7 @@ class StorageSpec // TODO: There are couple of issues, firstly there are two `Content-Length` headers being added, one by `putBlob` // function and secondly by, most likely, by WireMock. Need to to figure out how to tell WireMock not to add `Content-Length` // header, secondly once that resolve then we get `akka.http.scaladsl.model.EntityStreamException`. - "put append blob" in { + "put append blob" ignore { mockPutAppendBlob() //#put-append-blob @@ -121,10 +120,8 @@ class StorageSpec import akka.stream.alpakka.azure.storage.ObjectMetadata val source: Source[Option[ObjectMetadata], NotUsed] = - BlobService.putAppendBlock( - objectPath = s"$containerName/$blobName", - contentType = ContentTypes.`text/plain(UTF-8)` - ) + BlobService.putAppendBlock(objectPath = s"$containerName/$blobName", + requestBuilder = PutAppendBlock(ContentTypes.`text/plain(UTF-8)`)) val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.runWith(Sink.head) //#put-append-blob @@ -144,7 +141,7 @@ class StorageSpec import akka.stream.alpakka.azure.storage.ObjectMetadata val source: Source[ByteString, Future[ObjectMetadata]] = - BlobService.getBlob(objectPath = s"$containerName/$blobName") + BlobService.getBlob(objectPath = s"$containerName/$blobName", GetBlob()) val eventualText = source.toMat(Sink.seq)(Keep.right).run() //#get-blob @@ -160,7 +157,7 @@ class StorageSpec import akka.stream.alpakka.azure.storage.ObjectMetadata val source: Source[ByteString, Future[ObjectMetadata]] = - BlobService.getBlob(objectPath = s"$containerName/$blobName", range = Some(subRange)) + BlobService.getBlob(objectPath = s"$containerName/$blobName", requestBuilder = GetBlob().withRange(subRange)) val eventualText: Future[Seq[ByteString]] = source.toMat(Sink.seq)(Keep.right).run() //#get-blob-range @@ -176,7 +173,7 @@ class StorageSpec import akka.stream.alpakka.azure.storage.ObjectMetadata val source: Source[Option[ObjectMetadata], NotUsed] = - BlobService.getProperties(objectPath = s"$containerName/$blobName") + BlobService.getProperties(objectPath = s"$containerName/$blobName", GetProperties()) val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.runWith(Sink.head) //#get-blob-properties @@ -197,7 +194,7 @@ class StorageSpec import akka.stream.alpakka.azure.storage.ObjectMetadata val source: Source[Option[ObjectMetadata], NotUsed] = - BlobService.deleteBlob(objectPath = s"$containerName/$blobName") + BlobService.deleteBlob(objectPath = s"$containerName/$blobName", DeleteBlob()) val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.runWith(Sink.head) //#delete-blob @@ -221,8 +218,7 @@ class StorageSpec val source: Source[Option[ObjectMetadata], NotUsed] = FileService.createFile(objectPath = s"$containerName/$blobName", - contentType = ContentTypes.`text/plain(UTF-8)`, - maxSize = contentLength) + requestBuilder = CreateFile(contentLength, ContentTypes.`text/plain(UTF-8)`)) val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.runWith(Sink.head) //#create-file @@ -247,9 +243,8 @@ class StorageSpec val source: Source[Option[ObjectMetadata], NotUsed] = FileService.updateRange( objectPath = s"$containerName/$blobName", - contentType = ContentTypes.`text/plain(UTF-8)`, - range = contentRange, - payload = Source.single(ByteString(payload)) + payload = Source.single(ByteString(payload)), + requestBuilder = UpdateFileRange(contentRange, ContentTypes.`text/plain(UTF-8)`) ) val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.toMat(Sink.head)(Keep.right).run() @@ -270,7 +265,7 @@ class StorageSpec import akka.stream.alpakka.azure.storage.ObjectMetadata val source: Source[ByteString, Future[ObjectMetadata]] = - FileService.getFile(objectPath = s"$containerName/$blobName") + FileService.getFile(objectPath = s"$containerName/$blobName", GetFile()) val eventualText: Future[Seq[ByteString]] = source.toMat(Sink.seq)(Keep.right).run() //#get-file @@ -286,7 +281,7 @@ class StorageSpec import akka.stream.alpakka.azure.storage.ObjectMetadata val source: Source[Option[ObjectMetadata], NotUsed] = - FileService.getProperties(objectPath = s"$containerName/$blobName") + FileService.getProperties(objectPath = s"$containerName/$blobName", GetProperties()) val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.toMat(Sink.head)(Keep.right).run() //#get-file-properties @@ -310,7 +305,7 @@ class StorageSpec import akka.stream.alpakka.azure.storage.ObjectMetadata val source: Source[Option[ObjectMetadata], NotUsed] = - FileService.clearRange(objectPath = s"$containerName/$blobName", range = subRange) + FileService.clearRange(objectPath = s"$containerName/$blobName", requestBuilder = ClearFileRange(subRange)) val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.runWith(Sink.head) //#clear-range @@ -330,7 +325,7 @@ class StorageSpec import akka.stream.alpakka.azure.storage.ObjectMetadata val source: Source[Option[ObjectMetadata], NotUsed] = - FileService.deleteFile(objectPath = s"$containerName/$blobName") + FileService.deleteFile(objectPath = s"$containerName/$blobName", DeleteFile()) val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.runWith(Sink.head) //#delete-file diff --git a/docs/src/main/paradox/azure-storage.md b/docs/src/main/paradox/azure-storage.md index e77b4dc5df..ff976d7918 100644 --- a/docs/src/main/paradox/azure-storage.md +++ b/docs/src/main/paradox/azure-storage.md @@ -56,6 +56,63 @@ At minimum following configurations needs to be set: * `account-key`, Account key to use to create authorization signature, mandatory for `SharedKey` or `SharedKeyLite` authorization types, as described [here](https://learn.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key). Environment variable `AZURE_STORAGE_ACCOUNT_KEY` can be set to override this configuration. * `sas-token` if authorization type is `sas`. Environment variable `AZURE_STORAGE_SAS_TOKEN` can be set to override this configuration. +## Building request + +Each function takes two parameters `objectPath` and `requestBuilder`. The `objectPath` is a `/` separated string of the path of the blob +or file, for example, `my-container/my-blob` or `my-share/my-directory/my-file`. + +Each request builder is subclass of [`RequestBuilder`](/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/RequestBuilder.scala) which knows how to construct request for the given operation. + +### Create simple request builder with default values + +In this example `GetBlob` builder is initialized without any optional field. + +Scala +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/RequestBuilderSpec.scala) { #simple-request-builder } + +Java +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/RequestBuilderTest.java) { #simple-request-builder } + +### Create request builder initialized with optional fields + +In this example `GetBlob` builder is initialized with given `leaseId` and `range` fields. + +Scala +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/RequestBuilderSpec.scala) { #populate-request-builder } + +Java +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/RequestBuilderTest.java) { #populate-request-builder } + +### Create request builder initialized with required fields + +In this example `CreateFile` builder is initialized with `maxFileSize` and `contentType` fields, which are required fields for `CreateFile` operation. + +Scala +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/RequestBuilderSpec.scala) { #request-builder-with-initial-values } + +Java +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/RequestBuilderTest.java) { #request-builder-with-initial-values } + +### Create request builder with ServerSideEncryption + +`ServerSideEncryption` can be initialized in similar fashion. + +Scala +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/RequestBuilderSpec.scala) { #request-builder-with-sse } + +Java +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/RequestBuilderTest.java) { #request-builder-with-sse } + +### Create request builder with additional headers + +Some operations allow you to add additional headers, for `GetBlob` you can specify `If-Match` header, which specify this header to perform the operation only if the resource's ETag matches the value specified, this can be done by calling `addHeader` function. + +Scala +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/RequestBuilderSpec.scala) { #request-builder-with-additional-headers } + +Java +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/RequestBuilderTest.java) { #request-builder-with-additional-headers } + ## Supported operations on Blob service ### Create Container @@ -156,7 +213,7 @@ Scala Java : @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.java) { #get-file } -### Get file properties without downloading blob +### Get file properties without downloading file The [`Get File Properties`](https://learn.microsoft.com/en-us/rest/api/storageservices/get-file-properties) operation returns all user-defined metadata, standard HTTP properties, and system properties for the file. (**Note:** Current implementation does not return user-defined metatdata.) From 9358d5ef5b159a69ce267d5e1ee2b4d246741362 Mon Sep 17 00:00:00 2001 From: sfali Date: Sat, 31 Aug 2024 16:33:57 -0400 Subject: [PATCH 61/78] test for loading settings from custom path #3253 1. don't throw RuntimeException, it fails to load config from custom path 2. clean up configs --- .../stream/alpakka/azure/storage/models.scala | 1 - .../test/resources/application-azurite.conf | 23 ------------ .../src/test/resources/application.conf | 22 +++++++++++ .../azure/storage/StorageSettingsSpec.scala | 37 +++++++++++++------ .../scaladsl/AzuriteIntegrationSpec.scala | 7 ++-- 5 files changed, 50 insertions(+), 40 deletions(-) delete mode 100644 azure-storage/src/test/resources/application-azurite.conf create mode 100644 azure-storage/src/test/resources/application.conf diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/models.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/models.scala index 7b1ea718c4..c19d44f3f9 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/models.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/models.scala @@ -27,7 +27,6 @@ object AzureNameKeyCredential { def apply(config: Config): AzureNameKeyCredential = { val accountName = config.getString("account-name", "") val accountKey = config.getString("account-key", "") - if (accountName.isEmpty) throw new RuntimeException("accountName property must be defined") AzureNameKeyCredential(accountName, accountKey) } } diff --git a/azure-storage/src/test/resources/application-azurite.conf b/azure-storage/src/test/resources/application-azurite.conf deleted file mode 100644 index 017bac8b7a..0000000000 --- a/azure-storage/src/test/resources/application-azurite.conf +++ /dev/null @@ -1,23 +0,0 @@ -alpakka { - azure-storage { - api-version = "2024-11-04" - signing-algorithm = "HmacSHA256" - - credentials { - authorization-type = SharedKey - account-name = "devstoreaccount1" - account-key = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" - sas-token = "" - } - } -} - -akka { - http { - client { - parsing { - max-response-reason-length = 512 - } - } - } -} diff --git a/azure-storage/src/test/resources/application.conf b/azure-storage/src/test/resources/application.conf new file mode 100644 index 0000000000..29bcdf4dcd --- /dev/null +++ b/azure-storage/src/test/resources/application.conf @@ -0,0 +1,22 @@ +azurite { + api-version = "2024-11-04" + signing-algorithm = "HmacSHA256" + endpoint-url = "http://127.0.0.1:10000" + + credentials { + authorization-type = SharedKey + account-name = "devstoreaccount1" + account-key = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" + sas-token = "" + } +} + +akka { + http { + client { + parsing { + max-response-reason-length = 512 + } + } + } +} diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageSettingsSpec.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageSettingsSpec.scala index 85af3656cd..9ef0ad7ea4 100644 --- a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageSettingsSpec.scala +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageSettingsSpec.scala @@ -6,14 +6,23 @@ package akka.stream.alpakka package azure package storage +import akka.actor.ActorSystem +import akka.testkit.TestKit import com.typesafe.config.ConfigFactory +import org.scalatest.BeforeAndAfterAll +import org.scalatest.concurrent.ScalaFutures import org.scalatest.matchers.should.Matchers -import org.scalatest.wordspec.AnyWordSpec -import scala.concurrent.duration._ +import org.scalatest.wordspec.{AnyWordSpec, AnyWordSpecLike} +import scala.concurrent.duration._ import scala.util.{Failure, Success, Try} -class StorageSettingsSpec extends AnyWordSpec with Matchers { +class StorageSettingsSpec + extends TestKit(ActorSystem("StorageSettingsSystem")) + with AnyWordSpecLike + with Matchers + with BeforeAndAfterAll + with ScalaFutures { private def mkSettings(more: String) = StorageSettings( @@ -38,6 +47,12 @@ class StorageSettingsSpec extends AnyWordSpec with Matchers { .resolve() ) + override protected def afterAll(): Unit = { + super.afterAll() + + system.terminate().futureValue + } + "StorageSettings" should { "correctly parse config with anonymous authorization type" in { val settings = mkSettings("") @@ -92,15 +107,6 @@ class StorageSettingsSpec extends AnyWordSpec with Matchers { } } - "throw RuntimeException if account name is empty" in { - Try(mkSettings(""" credentials.account-name="" """)) match { - case Failure(ex) => - ex shouldBe a[RuntimeException] - ex.getMessage shouldBe "accountName property must be defined" - case Success(_) => fail("Should have thrown exception") - } - } - "parse retry settings" in { val settings = mkSettings("") settings.retrySettings shouldBe RetrySettings(maxRetries = 5, @@ -134,5 +140,12 @@ class StorageSettingsSpec extends AnyWordSpec with Matchers { settings.endPointUrl shouldBe defined settings.endPointUrl.get shouldBe "http://localhost:1234" } + + "load custom settings from a path" in { + val actual = StorageExt(system).settings("azurite") + actual.authorizationType shouldBe "SharedKey" + actual.endPointUrl shouldBe Some("http://127.0.0.1:10000") + actual.azureNameKeyCredential.accountName shouldBe "devstoreaccount1" + } } } diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzuriteIntegrationSpec.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzuriteIntegrationSpec.scala index a8ccad9673..76037aca2b 100644 --- a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzuriteIntegrationSpec.scala +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzuriteIntegrationSpec.scala @@ -10,7 +10,6 @@ package scaladsl import akka.actor.ActorSystem import akka.stream.Attributes import com.dimafeng.testcontainers.ForAllTestContainer -import com.typesafe.config.ConfigFactory import org.scalatest.Ignore import scala.concurrent.duration._ @@ -23,10 +22,10 @@ class AzuriteIntegrationSpec extends StorageIntegrationSpec with ForAllTestConta override lazy val container: AzuriteContainer = new AzuriteContainer() - override protected implicit val system: ActorSystem = - ActorSystem("StorageIntegrationSpec", ConfigFactory.load("application-azurite").withFallback(ConfigFactory.load())) + override protected implicit val system: ActorSystem = ActorSystem("AzuriteIntegrationSpec") - protected lazy val blobSettings: StorageSettings = StorageSettings().withEndPointUrl(container.getBlobHostAddress) + protected lazy val blobSettings: StorageSettings = + StorageExt(system).settings("azurite").withEndPointUrl(container.getBlobHostAddress) override protected def beforeAll(): Unit = { super.beforeAll() From 2a0220ba300886cab5fe5bf5664ed29887f08c76 Mon Sep 17 00:00:00 2001 From: sfali Date: Sat, 31 Aug 2024 16:51:52 -0400 Subject: [PATCH 62/78] remove unused import #3253 --- .../akka/stream/alpakka/azure/storage/StorageSettingsSpec.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageSettingsSpec.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageSettingsSpec.scala index 9ef0ad7ea4..c7207ae087 100644 --- a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageSettingsSpec.scala +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/StorageSettingsSpec.scala @@ -12,7 +12,7 @@ import com.typesafe.config.ConfigFactory import org.scalatest.BeforeAndAfterAll import org.scalatest.concurrent.ScalaFutures import org.scalatest.matchers.should.Matchers -import org.scalatest.wordspec.{AnyWordSpec, AnyWordSpecLike} +import org.scalatest.wordspec.AnyWordSpecLike import scala.concurrent.duration._ import scala.util.{Failure, Success, Try} From 93293183eec46cac5160ed0615bd6590dc7cbb93 Mon Sep 17 00:00:00 2001 From: sfali Date: Mon, 2 Sep 2024 13:37:13 -0400 Subject: [PATCH 63/78] Signer spec #3253 1. add implicit clock in order to test date 2. add test dependency to storage common in order to test authorization header 3. change to lower case as per Azure storage specification 4. handling of empty query --- .../storage/impl/AzureStorageStream.scala | 2 + .../azure/storage/impl/auth/Signer.scala | 29 +-- .../alpakka/azure/storage/package.scala | 6 +- .../storage/requests/RequestBuilder.scala | 10 +- .../azure/storage/impl/auth/SignerSpec.scala | 202 ++++++++++++++++++ project/Dependencies.scala | 4 +- 6 files changed, 230 insertions(+), 23 deletions(-) create mode 100644 azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/impl/auth/SignerSpec.scala diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala index 35bf2272fb..91c81b1d7a 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala @@ -37,6 +37,7 @@ import akka.stream.alpakka.azure.storage.requests.{ import akka.stream.scaladsl.{Flow, RetryFlow, Source} import akka.util.ByteString +import java.time.Clock import scala.collection.immutable import scala.concurrent.{Future, Promise} import scala.util.{Failure, Success, Try} @@ -203,6 +204,7 @@ object AzureStorageStream { ) import settings.retrySettings._ + implicit val clock: Clock = Clock.systemUTC() Source .single(request) .flatMapConcat(request => Signer(request, settings).signedRequest) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/auth/Signer.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/auth/Signer.scala index 93869fda3f..53264fdc1a 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/auth/Signer.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/auth/Signer.scala @@ -12,6 +12,7 @@ import akka.http.scaladsl.model.{HttpRequest, Uri} import akka.http.scaladsl.model.headers._ import akka.stream.scaladsl.Source +import java.time.Clock import java.util.Base64 import javax.crypto.Mac import javax.crypto.spec.SecretKeySpec @@ -23,7 +24,7 @@ import javax.crypto.spec.SecretKeySpec * @param settings * - Storage settings */ -final case class Signer(initialRequest: HttpRequest, settings: StorageSettings) { +final case class Signer(initialRequest: HttpRequest, settings: StorageSettings)(implicit clock: Clock) { private val credential = settings.azureNameKeyCredential private val requestWithHeaders = @@ -38,22 +39,15 @@ final case class Signer(initialRequest: HttpRequest, settings: StorageSettings) } def signedRequest: Source[HttpRequest, NotUsed] = { - import Signer._ val authorizationType = settings.authorizationType if (authorizationType == AnonymousAuthorizationType || authorizationType == SasAuthorizationType) Source.single(requestWithHeaders) - else { - val headersToSign = - if (authorizationType == SharedKeyLiteAuthorizationType) buildHeadersToSign(SharedKeyLiteHeaders) - else buildHeadersToSign(SharedKeyHeaders) - - val signature = sign(headersToSign) + else Source.single( requestWithHeaders.addHeader( - RawHeader(AuthorizationHeaderKey, s"$authorizationType ${credential.accountName}:$signature") + RawHeader(AuthorizationHeaderKey, generateAuthorizationHeader) ) ) - } } private def getHeaderOptionalValue(headerName: String) = @@ -81,7 +75,7 @@ final case class Signer(initialRequest: HttpRequest, settings: StorageSettings) private def getAdditionalXmsHeaders = requestWithHeaders.headers.filter(header => header.name().startsWith("x-ms-")).sortBy(_.name()).map { header => - s"${header.name()}:${header.value()}" + s"${header.name().toLowerCase}:${header.value()}" } private def getCanonicalizedResource = { @@ -92,7 +86,7 @@ final case class Signer(initialRequest: HttpRequest, settings: StorageSettings) uri.queryString() match { case Some(queryString) => Uri.Query(queryString).toMap.toSeq.sortBy(_._1).map { - case (key, value) => s"$key:$value" + case (key, value) => s"${key.toLowerCase}:$value" } case None => Seq.empty[String] @@ -101,8 +95,15 @@ final case class Signer(initialRequest: HttpRequest, settings: StorageSettings) Seq(resourcePath) ++ queries } - private def sign(headersToSign: Seq[String]) = - Base64.getEncoder.encodeToString(mac.doFinal(headersToSign.mkString(NewLine).getBytes)) + private[auth] def generateAuthorizationHeader: String = { + import Signer._ + val authorizationType = settings.authorizationType + val headersToSign = + if (authorizationType == SharedKeyLiteAuthorizationType) buildHeadersToSign(SharedKeyLiteHeaders) + else buildHeadersToSign(SharedKeyHeaders) + val signature = Base64.getEncoder.encodeToString(mac.doFinal(headersToSign.mkString(NewLine).getBytes)) + s"$authorizationType ${credential.accountName}:$signature" + } } object Signer { diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala index a1f7c2088a..a39e98e821 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala @@ -7,7 +7,7 @@ package azure import com.typesafe.config.Config -import java.time.{ZoneOffset, ZonedDateTime} +import java.time.{Clock, ZoneOffset} import java.time.format.DateTimeFormatter package object storage { @@ -34,8 +34,8 @@ package object storage { private[storage] val PageBlobType = "PageBlob" private[storage] val AppendBlobType = "AppendBlob" - private[storage] def getFormattedDate: String = - DateTimeFormatter.RFC_1123_DATE_TIME.format(ZonedDateTime.now(ZoneOffset.UTC)) + private[storage] def getFormattedDate(implicit clock: Clock): String = + DateTimeFormatter.RFC_1123_DATE_TIME.format(clock.instant().atOffset(ZoneOffset.UTC)) /** Removes ETag quotes in the same way the official AWS tooling does. See */ diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/RequestBuilder.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/RequestBuilder.scala index 8ba317647e..a492224342 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/RequestBuilder.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/RequestBuilder.scala @@ -30,10 +30,12 @@ abstract class RequestBuilder(val sse: Option[ServerSideEncryption] = None, private[storage] def createRequest(settings: StorageSettings, storageType: String, objectPath: String): HttpRequest = HttpRequest( method = method, - uri = createUri(settings = settings, - storageType = storageType, - objectPath = objectPath, - queryString = createQueryString(settings, Some(Query(queryParams).toString()))), + uri = createUri( + settings = settings, + storageType = storageType, + objectPath = objectPath, + queryString = createQueryString(settings, emptyStringToOption(Query(queryParams).toString())) + ), headers = getHeaders ) diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/impl/auth/SignerSpec.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/impl/auth/SignerSpec.scala new file mode 100644 index 0000000000..9cf5770960 --- /dev/null +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/impl/auth/SignerSpec.scala @@ -0,0 +1,202 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package impl.auth + +import akka.actor.ActorSystem +import akka.http.scaladsl.model.{ContentTypes, HttpMethods} +import akka.http.scaladsl.model.headers.{ + `Content-Length`, + `Content-Type`, + `If-Match`, + `If-Modified-Since`, + ByteRange, + Range +} +import akka.stream.alpakka.azure.storage.headers.ServerSideEncryption +import akka.stream.alpakka.azure.storage.requests.{GetBlob, GetFile, PutBlockBlob} +import akka.testkit.TestKit +import com.azure.storage.common.StorageSharedKeyCredential +import org.scalatest.BeforeAndAfterAll +import org.scalatest.concurrent.ScalaFutures +import org.scalatest.flatspec.AnyFlatSpecLike +import org.scalatest.matchers.should.Matchers + +import java.net.URL +import java.time.{Clock, Instant, ZoneOffset} +import scala.jdk.CollectionConverters._ + +class SignerSpec + extends TestKit(ActorSystem("SignerSystem")) + with AnyFlatSpecLike + with Matchers + with BeforeAndAfterAll + with ScalaFutures { + + private implicit val clock: Clock = Clock.fixed(Instant.parse("2024-09-02T12:00:00Z"), ZoneOffset.UTC) + private val objectPath = "my-container/MY-BLOB.csv" + private val filePath = "my-directory/my-sub-directory/my-file.csv" + private lazy val storageSettings = StorageExt(system).settings("azurite").withEndPointUrl("") + private lazy val storageSharedKeyCredential = new StorageSharedKeyCredential( + storageSettings.azureNameKeyCredential.accountName, + system.settings.config.getString("azurite.credentials.account-key") + ) + + private val leaseId = "ABC123" + private val range = ByteRange(0, 50) + private val versionId = "12345XYZ" + private val customerKey = "EqqWHbls3Y1Hp5B+IS5oUA==" + + override protected def afterAll(): Unit = { + super.afterAll() + system.terminate().futureValue + } + + it should "sign request with no parameters" in { + val requestBuilder = GetBlob() + val request = + requestBuilder.createRequest(settings = storageSettings, storageType = BlobType, objectPath = objectPath) + + val signer = Signer(request, storageSettings) + signer.generateAuthorizationHeader shouldBe generateAuthorizationHeader(BlobType, + objectPath, + HttpMethods.GET.name(), + Map(`Content-Length`.name -> "0")) + } + + it should "sign request with one optional parameter" in { + val requestBuilder = GetBlob().withLeaseId(leaseId) + val request = + requestBuilder.createRequest(settings = storageSettings, storageType = BlobType, objectPath = objectPath) + val expectedValue = generateAuthorizationHeader(BlobType, + objectPath, + HttpMethods.GET.name(), + Map(`Content-Length`.name -> "0", LeaseIdHeaderKey -> leaseId)) + Signer(request, storageSettings).generateAuthorizationHeader shouldBe expectedValue + } + + it should "sign request with multiple parameters" in { + val requestBuilder = GetBlob().withLeaseId(leaseId).withRange(range) + val request = + requestBuilder.createRequest(settings = storageSettings, storageType = BlobType, objectPath = objectPath) + val expectedValue = + generateAuthorizationHeader( + BlobType, + objectPath, + HttpMethods.GET.name(), + Map(`Content-Length`.name -> "0", + LeaseIdHeaderKey -> leaseId, + Range.name -> s"bytes=${range.first}-${range.last}") + ) + Signer(request, storageSettings).generateAuthorizationHeader shouldBe expectedValue + } + + it should "sign request with multiple parameters and query parameter" in { + val requestBuilder = GetBlob().withLeaseId(leaseId).withRange(range).withVersionId(versionId) + val request = + requestBuilder.createRequest(settings = storageSettings, storageType = BlobType, objectPath = objectPath) + val expectedValue = + generateAuthorizationHeader( + BlobType, + objectPath, + HttpMethods.GET.name(), + Map(`Content-Length`.name -> "0", + LeaseIdHeaderKey -> leaseId, + Range.name -> s"bytes=${range.first}-${range.last}"), + Some(s"versionId=$versionId") + ) + Signer(request, storageSettings).generateAuthorizationHeader shouldBe expectedValue + } + + it should "sign request with only query parameter" in { + val requestBuilder = GetFile().withVersionId(versionId) + val request = + requestBuilder.createRequest(settings = storageSettings, storageType = FileType, objectPath = filePath) + val expectedValue = + generateAuthorizationHeader( + BlobType, + filePath, + HttpMethods.GET.name(), + Map(`Content-Length`.name -> "0"), + Some(s"versionId=$versionId") + ) + Signer(request, storageSettings).generateAuthorizationHeader shouldBe expectedValue + } + + it should "sign request with conditional headers" in { + val requestBuilder = + GetFile() + .addHeader("", "") + .addHeader(`If-Match`.name, "abXzWj65") + .addHeader(`If-Modified-Since`.name, getFormattedDate) + val request = + requestBuilder.createRequest(settings = storageSettings, storageType = FileType, objectPath = filePath) + val expectedValue = + generateAuthorizationHeader( + BlobType, + filePath, + HttpMethods.GET.name(), + Map(`Content-Length`.name -> "0", `If-Match`.name -> "abXzWj65", `If-Modified-Since`.name -> getFormattedDate) + ) + Signer(request, storageSettings).generateAuthorizationHeader shouldBe expectedValue + } + + it should "sign request with required parameters" in { + val requestBuilder = PutBlockBlob(1024, ContentTypes.`text/csv(UTF-8)`) + val request = + requestBuilder.createRequest(settings = storageSettings, storageType = BlobType, objectPath = objectPath) + val expectedValue = + generateAuthorizationHeader( + BlobType, + objectPath, + HttpMethods.PUT.name(), + Map(`Content-Length`.name -> "1024", + `Content-Type`.name -> ContentTypes.`text/csv(UTF-8)`.value, + "x-ms-blob-type" -> BlockBlobType) + ) + Signer(request, storageSettings).generateAuthorizationHeader shouldBe expectedValue + } + + it should "sign request with ServerSideEncryption enabled" in { + val requestBuilder = + PutBlockBlob(1024, ContentTypes.`text/csv(UTF-8)`) + .withServerSideEncryption(ServerSideEncryption.customerKey(customerKey)) + val request = + requestBuilder.createRequest(settings = storageSettings, storageType = BlobType, objectPath = objectPath) + val expectedValue = { + generateAuthorizationHeader( + BlobType, + objectPath, + HttpMethods.PUT.name(), + Map( + `Content-Length`.name -> "1024", + `Content-Type`.name -> ContentTypes.`text/csv(UTF-8)`.value, + "x-ms-blob-type" -> BlockBlobType, + "x-ms-encryption-algorithm" -> "AES256", + "x-ms-encryption-key" -> customerKey, + "x-ms-encryption-key-sha256" -> "Zq+2UiSyaBzexl9Y1S/TzJssWRSwsfTnPsMKA+Kew2g=" + ) + ) + } + Signer(request, storageSettings).generateAuthorizationHeader shouldBe expectedValue + } + + // generates authorization header using Azure API + private def generateAuthorizationHeader(storageType: String, + objectPath: String, + httpMethod: String, + headers: Map[String, String], + maybeQueryString: Option[String] = None) = { + val queryString = maybeQueryString.map(value => s"?$value").getOrElse("") + val allHeaders = headers ++ Map("x-ms-date" -> getFormattedDate, "x-ms-version" -> storageSettings.apiVersion) + val url = new URL( + s"https://${storageSharedKeyCredential.getAccountName}.$storageType.core.windows.net/$objectPath$queryString" + ) + storageSharedKeyCredential.generateAuthorizationHeader(url, httpMethod, allHeaders.asJava) + } + +} diff --git a/project/Dependencies.scala b/project/Dependencies.scala index 02697b9097..5856f66366 100644 --- a/project/Dependencies.scala +++ b/project/Dependencies.scala @@ -113,8 +113,8 @@ object Dependencies { libraryDependencies ++= Seq( "com.typesafe.akka" %% "akka-http" % AkkaHttpVersion, "com.typesafe.akka" %% "akka-http-xml" % AkkaHttpVersion, - // in-memory filesystem for file related tests - "com.google.jimfs" % "jimfs" % "1.3.0" % Test, // ApacheV2 + // for testing authorization signature + "com.azure" % "azure-storage-common" % "12.26.1" % Test, wiremock ) ) From e770b5393aea65d5541bdf1dd433e4ab47924c41 Mon Sep 17 00:00:00 2001 From: sfali Date: Wed, 4 Sep 2024 07:28:20 -0400 Subject: [PATCH 64/78] Fix for multiple content length header #3253 1. Filter WireMock request for multiple content-length headers 2. fix typo in test --- .../scaladsl/AzuriteIntegrationSpec.scala | 2 +- .../scaladsl/StorageWireMockBase.scala | 31 ++++++++++++++++++- 2 files changed, 31 insertions(+), 2 deletions(-) diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzuriteIntegrationSpec.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzuriteIntegrationSpec.scala index 76037aca2b..f096630656 100644 --- a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzuriteIntegrationSpec.scala +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzuriteIntegrationSpec.scala @@ -16,7 +16,7 @@ import scala.concurrent.duration._ import scala.concurrent.Await // TODO: investigate how Azurite works, it is not even working with pure Java API -// `putBlob` operations fails with "remature end of file." +// `putBlob` operations fails with "Premature end of file." error with BadRequest. @Ignore class AzuriteIntegrationSpec extends StorageIntegrationSpec with ForAllTestContainer { diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala index 3eee9abbf3..29d9dda24a 100644 --- a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala @@ -16,7 +16,13 @@ import com.github.tomakehurst.wiremock.WireMockServer import com.github.tomakehurst.wiremock.client.WireMock import com.github.tomakehurst.wiremock.client.WireMock._ import com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig -import com.github.tomakehurst.wiremock.stubbing.StubMapping +import com.github.tomakehurst.wiremock.extension.requestfilter.{ + RequestFilterAction, + RequestWrapper, + StubRequestFilterV2 +} +import com.github.tomakehurst.wiremock.http.Request +import com.github.tomakehurst.wiremock.stubbing.{ServeEvent, StubMapping} import com.typesafe.config.{Config, ConfigFactory} abstract class StorageWireMockBase(_system: ActorSystem, val _wireMockServer: WireMockServer) extends TestKit(_system) { @@ -194,6 +200,7 @@ object StorageWireMockBase { def initServer(): WireMockServer = { val server = new WireMockServer( wireMockConfig() + .extensions(new RemoveDuplicateContentLengthHeader()) .dynamicPort() ) server.start() @@ -221,4 +228,26 @@ object StorageWireMockBase { | } |} |""".stripMargin) + + private class RemoveDuplicateContentLengthHeader extends StubRequestFilterV2 { + override def filter(request: Request, serveEvent: ServeEvent): RequestFilterAction = { + val headerName = `Content-Length`.name + + val updatedRequest = + Option(request.header(headerName)).map(_.firstValue()) match { + case Some(contentLengthValue) => + RequestWrapper + .create() + .removeHeader(headerName) + .addHeader(headerName, contentLengthValue) + .wrap(request) + + case None => request + } + + RequestFilterAction.continueWith(updatedRequest) + } + + override def getName: String = "remove-duplicate-content-length-headers" + } } From 1cbb77ee1e77a45ebc26da5e38d2d12c4c90ed6c Mon Sep 17 00:00:00 2001 From: sfali Date: Thu, 5 Sep 2024 09:59:10 -0400 Subject: [PATCH 65/78] Clean up duplicate code #3253 1. Move logic to execute rest call to generic `handleRequest` function 2. Introduce functions each operation and delegate call to `handleRequest` 3. Service APIs to call explicit function --- .../storage/impl/AzureStorageStream.scala | 153 ++++++++---------- .../azure/storage/javadsl/BlobService.scala | 4 +- .../azure/storage/javadsl/FileService.scala | 4 +- .../azure/storage/scaladsl/BlobService.scala | 4 +- .../azure/storage/scaladsl/FileService.scala | 4 +- 5 files changed, 72 insertions(+), 97 deletions(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala index 91c81b1d7a..aebbdadc1c 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala @@ -38,7 +38,6 @@ import akka.stream.scaladsl.{Flow, RetryFlow, Source} import akka.util.ByteString import java.time.Clock -import scala.collection.immutable import scala.concurrent.{Future, Promise} import scala.util.{Failure, Success, Try} @@ -70,120 +69,102 @@ object AzureStorageStream { } .mapMaterializedValue(_.flatMap(identity)(ExecutionContexts.parasitic)) - private[storage] def getObjectProperties(storageType: String, - objectPath: String, - requestBuilder: GetProperties): Source[Option[ObjectMetadata], NotUsed] = - Source - .fromMaterializer { (mat, attr) => - implicit val system: ActorSystem = mat.system - import mat.executionContext - val settings = resolveSettings(attr, system) - val request = requestBuilder.createRequest(settings, storageType, objectPath) - signAndRequest(request, settings) - .flatMapConcat { - case HttpResponse(OK, headers, entity, _) => - Source.future( - entity.withoutSizeLimit().discardBytes().future().map(_ => Some(computeMetaData(headers, entity))) - ) - case HttpResponse(NotFound, _, entity, _) => - Source.future(entity.discardBytes().future().map(_ => None)(ExecutionContexts.parasitic)) - case response: HttpResponse => Source.future(unmarshalError(response.status, response.entity)) - } - } - .mapMaterializedValue(_ => NotUsed) + private[storage] def getBlobProperties(objectPath: String, + requestBuilder: GetProperties): Source[Option[ObjectMetadata], NotUsed] = + handleRequest(successCode = OK, storageType = BlobType, objectPath = objectPath, requestBuilder = requestBuilder) - private[storage] def deleteObject(storageType: String, - objectPath: String, - requestBuilder: RequestBuilder): Source[Option[ObjectMetadata], NotUsed] = - Source - .fromMaterializer { (mat, attr) => - implicit val system: ActorSystem = mat.system - import mat.executionContext - val settings = resolveSettings(attr, system) - val request = requestBuilder.createRequest(settings, storageType, objectPath) - signAndRequest(request, settings) - .flatMapConcat { - case HttpResponse(Accepted, headers, entity, _) => - Source.future( - entity.withoutSizeLimit().discardBytes().future().map(_ => Some(computeMetaData(headers, entity))) - ) - case HttpResponse(NotFound, _, entity, _) => - Source.future(entity.discardBytes().future().map(_ => None)(ExecutionContexts.parasitic)) - case response: HttpResponse => Source.future(unmarshalError(response.status, response.entity)) - } - } - .mapMaterializedValue(_ => NotUsed) + private[storage] def getFileProperties(objectPath: String, + requestBuilder: GetProperties): Source[Option[ObjectMetadata], NotUsed] = + handleRequest(successCode = OK, storageType = FileType, objectPath = objectPath, requestBuilder = requestBuilder) + + private[storage] def deleteBlob(objectPath: String, + requestBuilder: RequestBuilder): Source[Option[ObjectMetadata], NotUsed] = + handleRequest(successCode = Accepted, + storageType = BlobType, + objectPath = objectPath, + requestBuilder = requestBuilder) + + private[storage] def deleteFile(objectPath: String, + requestBuilder: RequestBuilder): Source[Option[ObjectMetadata], NotUsed] = + handleRequest(successCode = Accepted, + storageType = FileType, + objectPath = objectPath, + requestBuilder = requestBuilder) private[storage] def putBlob(objectPath: String, requestBuilder: RequestBuilder, maybeHttpEntity: Option[MessageEntity]): Source[Option[ObjectMetadata], NotUsed] = - putRequest(objectPath = objectPath, - storageType = BlobType, - requestBuilder = requestBuilder, - maybeHttpEntity = maybeHttpEntity) + handleRequest(successCode = Created, + storageType = BlobType, + objectPath = objectPath, + requestBuilder = requestBuilder, + maybeHttpEntity = maybeHttpEntity) private[storage] def createFile(objectPath: String, requestBuilder: CreateFile): Source[Option[ObjectMetadata], NotUsed] = - putRequest(objectPath = objectPath, storageType = FileType, requestBuilder = requestBuilder) + handleRequest(successCode = Created, + storageType = FileType, + objectPath = objectPath, + requestBuilder = requestBuilder) private[storage] def updateRange(objectPath: String, httpEntity: MessageEntity, requestBuilder: UpdateFileRange): Source[Option[ObjectMetadata], NotUsed] = - putRequest( - objectPath = objectPath, + handleRequest( + successCode = Created, storageType = FileType, + objectPath = objectPath, requestBuilder = requestBuilder, maybeHttpEntity = Some(httpEntity) ) private[storage] def clearRange(objectPath: String, requestBuilder: ClearFileRange): Source[Option[ObjectMetadata], NotUsed] = - putRequest(objectPath = objectPath, storageType = FileType, requestBuilder = requestBuilder) + handleRequest(successCode = Created, + storageType = FileType, + objectPath = objectPath, + requestBuilder = requestBuilder) private[storage] def createContainer(objectPath: String, requestBuilder: CreateContainer): Source[Option[ObjectMetadata], NotUsed] = - putRequest(objectPath = objectPath, storageType = BlobType, requestBuilder = requestBuilder) + handleRequest(successCode = Created, + storageType = BlobType, + objectPath = objectPath, + requestBuilder = requestBuilder) /** - *Common function for "PUT" request where we don't expect response body. + * Common function to handle all requests where we don't expect response body. * - * @param objectPath path of the object. + * @param successCode status code for successful response * @param storageType storage type + * @param objectPath path of the object. * @param requestBuilder request builder * @param maybeHttpEntity optional http entity * @return Source with metadata containing response headers */ - private def putRequest(objectPath: String, - storageType: String, - requestBuilder: RequestBuilder, - maybeHttpEntity: Option[MessageEntity] = None): Source[Option[ObjectMetadata], NotUsed] = { + private def handleRequest( + successCode: StatusCode, + storageType: String, + objectPath: String, + requestBuilder: RequestBuilder, + maybeHttpEntity: Option[MessageEntity] = None + ): Source[Option[ObjectMetadata], NotUsed] = Source .fromMaterializer { (mat, attr) => implicit val system: ActorSystem = mat.system + import system.dispatcher val settings = resolveSettings(attr, system) val httpEntity = maybeHttpEntity.getOrElse(HttpEntity.Empty) val request = requestBuilder.createRequest(settings, storageType, objectPath).withEntity(httpEntity) - handlePutRequest(request, settings) + signAndRequest(request, settings).flatMapConcat { + case HttpResponse(sc, h, entity, _) if sc == successCode => + Source.future(entity.withoutSizeLimit().discardBytes().future().map(_ => Some(computeMetaData(h, entity)))) + case HttpResponse(NotFound, _, entity, _) => + Source.future(entity.withoutSizeLimit().discardBytes().future().map(_ => None)(ExecutionContexts.parasitic)) + case response: HttpResponse => Source.future(unmarshalError(response.status, response.entity)) + } } .mapMaterializedValue(_ => NotUsed) - } - - private def handlePutRequest(request: HttpRequest, settings: StorageSettings)(implicit system: ActorSystem) = { - import system.dispatcher - signAndRequest(request, settings).flatMapConcat { - case HttpResponse(Created, h, entity, _) => - Source.future(entity.discardBytes().future().map { _ => - val contentLengthHeader = `Content-Length` - .parseFromValueString(entity.contentLengthOption.getOrElse(0L).toString) - .map(Seq(_)) - .getOrElse(Nil) - Some(ObjectMetadata(h ++ contentLengthHeader)) - }) - case HttpResponse(NotFound, _, entity, _) => - Source.future(entity.discardBytes().future().map(_ => None)(ExecutionContexts.parasitic)) - case response: HttpResponse => Source.future(unmarshalError(response.status, response.entity)) - } - } private def signAndRequest( request: HttpRequest, @@ -195,7 +176,8 @@ object AzureStorageStream { val retriableFlow = Flow[HttpRequest] .mapAsync(parallelism = 1)( req => - singleRequest(req) + Http() + .singleRequest(req) .map(Success.apply) .recover[Try[HttpResponse]] { case t => @@ -220,24 +202,17 @@ object AzureStorageStream { .mapAsync(1)(Future.fromTry) } - private def singleRequest(request: HttpRequest)(implicit system: ActorSystem) = - Http().singleRequest(request) - - private def computeMetaData(headers: immutable.Seq[HttpHeader], entity: ResponseEntity): ObjectMetadata = { - val contentLengthHeader: Seq[HttpHeader] = `Content-Length` + private def computeMetaData(headers: Seq[HttpHeader], entity: ResponseEntity): ObjectMetadata = { + val contentLengthHeader = `Content-Length` .parseFromValueString(entity.contentLengthOption.getOrElse(0L).toString) .map(Seq(_)) .getOrElse(Nil) - val contentTypeHeader: Seq[HttpHeader] = `Content-Type` + val contentTypeHeader = `Content-Type` .parseFromValueString(entity.contentType.value) .map(Seq(_)) .getOrElse(Nil) ObjectMetadata( - headers ++ - contentLengthHeader ++ contentTypeHeader ++ - immutable.Seq( - CustomContentTypeHeader(entity.contentType) - ) + headers ++ contentLengthHeader ++ contentTypeHeader ++ Seq(CustomContentTypeHeader(entity.contentType)) ) } diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala index 7bab23a627..bd1ce927c9 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala @@ -56,7 +56,7 @@ object BlobService { */ def getProperties(objectPath: String, requestBuilder: GetProperties): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream - .getObjectProperties(BlobType, objectPath, requestBuilder) + .getBlobProperties(objectPath, requestBuilder) .map(opt => Optional.ofNullable(opt.orNull)) .asJava @@ -70,7 +70,7 @@ object BlobService { */ def deleteBlob(objectPath: String, requestBuilder: DeleteFile): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream - .deleteObject(BlobType, objectPath, requestBuilder) + .deleteBlob(objectPath, requestBuilder) .map(opt => Optional.ofNullable(opt.orNull)) .asJava diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala index 134afa0345..f53b0aaca7 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala @@ -56,7 +56,7 @@ object FileService { */ def getProperties(objectPath: String, requestBuilder: GetProperties): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream - .getObjectProperties(FileType, objectPath, requestBuilder) + .getFileProperties(objectPath, requestBuilder) .map(opt => Optional.ofNullable(opt.orNull)) .asJava @@ -70,7 +70,7 @@ object FileService { */ def deleteFile(objectPath: String, requestBuilder: DeleteFile): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream - .deleteObject(FileType, objectPath, requestBuilder) + .deleteFile(objectPath, requestBuilder) .map(opt => Optional.ofNullable(opt.orNull)) .asJava diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala index 20733642f5..e4b508a848 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala @@ -49,7 +49,7 @@ object BlobService { * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ def getProperties(objectPath: String, requestBuilder: GetProperties): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.getObjectProperties(BlobType, objectPath, requestBuilder) + AzureStorageStream.getBlobProperties(objectPath, requestBuilder) /** * Deletes blob. @@ -60,7 +60,7 @@ object BlobService { * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ def deleteBlob(objectPath: String, requestBuilder: DeleteBlob): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.deleteObject(BlobType, objectPath, requestBuilder) + AzureStorageStream.deleteBlob(objectPath, requestBuilder) /** * Put Block blob. diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala index 4624b7f736..dbf099008f 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala @@ -48,7 +48,7 @@ object FileService { * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ def getProperties(objectPath: String, requestBuilder: GetProperties): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.getObjectProperties(FileType, objectPath, requestBuilder) + AzureStorageStream.getFileProperties(objectPath, requestBuilder) /** * Deletes file. @@ -59,7 +59,7 @@ object FileService { * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist */ def deleteFile(objectPath: String, requestBuilder: DeleteFile): Source[Option[ObjectMetadata], NotUsed] = - AzureStorageStream.deleteObject(FileType, objectPath, requestBuilder) + AzureStorageStream.deleteFile(objectPath, requestBuilder) /** * Creates a file. From da8d82ff33e829bd28b3f156757291a7bf0a71c3 Mon Sep 17 00:00:00 2001 From: sfali Date: Sat, 7 Sep 2024 16:29:55 -0400 Subject: [PATCH 66/78] Add more test #3253 --- .../test/java/docs/javadsl/StorageTest.java | 11 +-- .../scaladsl/StorageWireMockBase.scala | 37 +++++++++- .../scala/docs/scaladsl/StorageSpec.scala | 68 ++++++++++++++++++- 3 files changed, 107 insertions(+), 9 deletions(-) diff --git a/azure-storage/src/test/java/docs/javadsl/StorageTest.java b/azure-storage/src/test/java/docs/javadsl/StorageTest.java index e8c844fa6b..dba0e62e33 100644 --- a/azure-storage/src/test/java/docs/javadsl/StorageTest.java +++ b/azure-storage/src/test/java/docs/javadsl/StorageTest.java @@ -35,6 +35,7 @@ import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; +import scala.Option; import java.util.List; import java.util.Optional; @@ -128,7 +129,7 @@ public void putAppendBlob() throws Exception { //#put-append-blob final Source, NotUsed> source = BlobService.putAppendBlock(containerName() + "/" + blobName(), - PutAppendBlock.create(ContentTypes.TEXT_PLAIN_UTF8)); + PutAppendBlock.create(ContentTypes.TEXT_PLAIN_UTF8)); final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); //#put-append-blob @@ -139,7 +140,7 @@ public void putAppendBlob() throws Exception { @Test public void getBlob() throws Exception { - mockGetBlob(); + mockGetBlob(Option.empty(), Option.empty()); //#get-blob final Source> source = @@ -213,7 +214,7 @@ public void createFile() throws Exception { //#create-file final Source, NotUsed> source = FileService.createFile(containerName() + "/" + blobName(), - CreateFile.create(contentLength(), ContentTypes.TEXT_PLAIN_UTF8)); + CreateFile.create(contentLength(), ContentTypes.TEXT_PLAIN_UTF8)); final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); //#create-file @@ -237,7 +238,7 @@ public void updateRange() throws Exception { final Source, NotUsed> source = FileService.updateRange(containerName() + "/" + blobName(), UpdateFileRange.create(contentRange(), ContentTypes.TEXT_PLAIN_UTF8), - Source.single(ByteString.fromString(payload()))); + Source.single(ByteString.fromString(payload()))); final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); //#update-range @@ -251,7 +252,7 @@ public void updateRange() throws Exception { @Test public void getFile() throws Exception { - mockGetBlob(); + mockGetBlob(Option.empty(), Option.empty()); //#get-file final Source> source = diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala index 29d9dda24a..416f0226da 100644 --- a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala @@ -25,6 +25,8 @@ import com.github.tomakehurst.wiremock.http.Request import com.github.tomakehurst.wiremock.stubbing.{ServeEvent, StubMapping} import com.typesafe.config.{Config, ConfigFactory} +import scala.util.Try + abstract class StorageWireMockBase(_system: ActorSystem, val _wireMockServer: WireMockServer) extends TestKit(_system) { private val port = _wireMockServer.port() @@ -102,9 +104,25 @@ abstract class StorageWireMockBase(_system: ActorSystem, val _wireMockServer: Wi ) ) - protected def mockGetBlob(): StubMapping = + protected def mockGetBlob(versionId: Option[String] = None, leaseId: Option[String] = None): StubMapping = mock.register( - get(urlEqualTo(s"/$AccountName/$containerName/$blobName")) + get(urlPathEqualTo(s"/$AccountName/$containerName/$blobName")) + .withQueryParam("versionId", toStringValuePattern(versionId)) + .withHeader(LeaseIdHeaderKey, toStringValuePattern(leaseId)) + .willReturn( + aResponse() + .withStatus(200) + .withHeader(ETag.name, ETagValue) + .withBody(payload) + ) + ) + + protected def mockGetBlobWithServerSideEncryption(): StubMapping = + mock.register( + get(urlPathEqualTo(s"/$AccountName/$containerName/$blobName")) + .withHeader("x-ms-encryption-algorithm", equalTo("AES256")) + .withHeader("x-ms-encryption-key", equalTo("SGVsbG9Xb3JsZA==")) + .withHeader("x-ms-encryption-key-sha256", equalTo("hy5OUM6ZkNiwQTMMR8nd0Rvsa1A66ThqmdqFhOm7EsQ=")) .willReturn( aResponse() .withStatus(200) @@ -188,6 +206,19 @@ abstract class StorageWireMockBase(_system: ActorSystem, val _wireMockServer: Wi ) ) + protected def mock404s(): StubMapping = + mock.register( + any(anyUrl()) + .willReturn(aResponse().withStatus(404).withBody(""" + | + | ResourceNotFound + | The specified resource doesn't exist. + | + |""".stripMargin)) + ) + + private def toStringValuePattern(maybeValue: Option[String]) = maybeValue.map(equalTo).getOrElse(absent()) + private def stopWireMockServer(): Unit = _wireMockServer.stop() } @@ -234,7 +265,7 @@ object StorageWireMockBase { val headerName = `Content-Length`.name val updatedRequest = - Option(request.header(headerName)).map(_.firstValue()) match { + Try(request.getHeader(headerName)).toOption match { case Some(contentLengthValue) => RequestWrapper .create() diff --git a/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala b/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala index 4201acd6e7..d80888b839 100644 --- a/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala +++ b/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala @@ -5,7 +5,9 @@ package docs.scaladsl import akka.NotUsed -import akka.http.scaladsl.model.ContentTypes +import akka.http.scaladsl.model.{ContentTypes, StatusCodes} +import akka.stream.alpakka.azure.storage.StorageException +import akka.stream.alpakka.azure.storage.headers.ServerSideEncryption import akka.stream.alpakka.azure.storage.requests._ import akka.stream.alpakka.azure.storage.scaladsl.StorageWireMockBase import akka.stream.alpakka.azure.storage.scaladsl.StorageWireMockBase.ETagRawValue @@ -149,6 +151,70 @@ class StorageSpec eventualText.futureValue.map(_.utf8String).mkString("") shouldBe payload } + "get blob with versionId" in { + mockGetBlob(Some("versionId")) + + import akka.stream.alpakka.azure.storage.scaladsl.BlobService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[ByteString, Future[ObjectMetadata]] = + BlobService.getBlob(objectPath = s"$containerName/$blobName", GetBlob().withVersionId("versionId")) + + val eventualText = source.toMat(Sink.seq)(Keep.right).run() + + eventualText.futureValue.map(_.utf8String).mkString("") shouldBe payload + } + + "get blob with optional header" in { + mockGetBlob(leaseId = Some("leaseId")) + + import akka.stream.alpakka.azure.storage.scaladsl.BlobService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[ByteString, Future[ObjectMetadata]] = + BlobService.getBlob(objectPath = s"$containerName/$blobName", GetBlob().withLeaseId("leaseId")) + + val eventualText = source.toMat(Sink.seq)(Keep.right).run() + + eventualText.futureValue.map(_.utf8String).mkString("") shouldBe payload + } + + "get blob with ServerSideEncryption" in { + mockGetBlobWithServerSideEncryption() + + import akka.stream.alpakka.azure.storage.scaladsl.BlobService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[ByteString, Future[ObjectMetadata]] = + BlobService.getBlob(objectPath = s"$containerName/$blobName", + GetBlob().withServerSideEncryption(ServerSideEncryption.customerKey("SGVsbG9Xb3JsZA=="))) + + val eventualText = source.toMat(Sink.seq)(Keep.right).run() + + eventualText.futureValue.map(_.utf8String).mkString("") shouldBe payload + } + + "get blob from non-existing container" in { + mock404s() + + import akka.stream.alpakka.azure.storage.scaladsl.BlobService + import akka.stream.alpakka.azure.storage.ObjectMetadata + + val source: Source[ByteString, Future[ObjectMetadata]] = + BlobService.getBlob(objectPath = s"$containerName/$blobName", GetBlob()) + + val eventualMetadata = source.toMat(Sink.seq)(Keep.right).run() + eventualMetadata.failed.futureValue shouldBe + StorageException( + statusCode = StatusCodes.NotFound, + errorCode = "ResourceNotFound", + errorMessage = "The specified resource doesn't exist.", + resourceName = None, + resourceValue = None, + reason = None + ) + } + "get blob range" in { mockGetBlobWithRange() From 9fb81d16f5cd762174eeebdeb4cf5d4bda43af26 Mon Sep 17 00:00:00 2001 From: sfali Date: Sat, 7 Sep 2024 16:36:17 -0400 Subject: [PATCH 67/78] remove support for SharedKeyLite #3253 --- azure-storage/src/main/resources/reference.conf | 2 +- .../stream/alpakka/azure/storage/impl/auth/Signer.scala | 6 +----- .../scala/akka/stream/alpakka/azure/storage/package.scala | 1 - .../scala/akka/stream/alpakka/azure/storage/settings.scala | 3 +-- docs/src/main/paradox/azure-storage.md | 2 +- 5 files changed, 4 insertions(+), 10 deletions(-) diff --git a/azure-storage/src/main/resources/reference.conf b/azure-storage/src/main/resources/reference.conf index 32d29afe59..8c6ab04030 100644 --- a/azure-storage/src/main/resources/reference.conf +++ b/azure-storage/src/main/resources/reference.conf @@ -9,7 +9,7 @@ alpakka { #azure-credentials credentials { - # valid values are anon (annonymous), SharedKey, SharedKeyLite, sas + # valid values are anon (annonymous), SharedKey, and sas authorization-type = anon authorization-type = ${?AZURE_STORAGE_AUTHORIZATION_TYPE} diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/auth/Signer.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/auth/Signer.scala index 53264fdc1a..a716aca505 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/auth/Signer.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/auth/Signer.scala @@ -98,9 +98,7 @@ final case class Signer(initialRequest: HttpRequest, settings: StorageSettings)( private[auth] def generateAuthorizationHeader: String = { import Signer._ val authorizationType = settings.authorizationType - val headersToSign = - if (authorizationType == SharedKeyLiteAuthorizationType) buildHeadersToSign(SharedKeyLiteHeaders) - else buildHeadersToSign(SharedKeyHeaders) + val headersToSign = buildHeadersToSign(SharedKeyHeaders) val signature = Base64.getEncoder.encodeToString(mac.doFinal(headersToSign.mkString(NewLine).getBytes)) s"$authorizationType ${credential.accountName}:$signature" } @@ -121,6 +119,4 @@ object Signer { `If-Unmodified-Since`.name, Range.name ) - - private val SharedKeyLiteHeaders = Seq("Content-MD5", `Content-Type`.name, Date.name) } diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala index a39e98e821..8cb17d77b0 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/package.scala @@ -26,7 +26,6 @@ package object storage { private[storage] val PageBlobSequenceNumberHeaderKey = "x-ms-blob-sequence-number" private[storage] val AnonymousAuthorizationType = "anon" private[storage] val SharedKeyAuthorizationType = "SharedKey" - private[storage] val SharedKeyLiteAuthorizationType = "SharedKeyLite" private[storage] val SasAuthorizationType = "sas" private[storage] val BlobType = "blob" private[storage] val FileType = "file" diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala index 3e3fef9247..45fcde8ff6 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/settings.scala @@ -114,8 +114,7 @@ final class StorageSettings(val apiVersion: String, object StorageSettings { private[storage] val ConfigPath = "alpakka.azure-storage" - private val AuthorizationTypes = - Seq(AnonymousAuthorizationType, SharedKeyAuthorizationType, SharedKeyLiteAuthorizationType, SasAuthorizationType) + private val AuthorizationTypes = Seq(AnonymousAuthorizationType, SharedKeyAuthorizationType, SasAuthorizationType) def apply( apiVersion: String, diff --git a/docs/src/main/paradox/azure-storage.md b/docs/src/main/paradox/azure-storage.md index ff976d7918..913405f026 100644 --- a/docs/src/main/paradox/azure-storage.md +++ b/docs/src/main/paradox/azure-storage.md @@ -51,7 +51,7 @@ Java At minimum following configurations needs to be set: -* `authorization-type`, this is the type of authorization to use as described [here](https://learn.microsoft.com/en-us/rest/api/storageservices/authorize-requests-to-azure-storage), possible values are `anon`, `SharedKey`, `SharedKeyLite`, or `sas`. Environment variable `AZURE_STORAGE_AUTHORIZATION_TYPE` can be set to override this configuration. +* `authorization-type`, this is the type of authorization to use as described [here](https://learn.microsoft.com/en-us/rest/api/storageservices/authorize-requests-to-azure-storage), possible values are `anon`, `SharedKey`, or `sas`. Environment variable `AZURE_STORAGE_AUTHORIZATION_TYPE` can be set to override this configuration. * `account-name`, this is the name of the blob storage or file share. Environment variable `AZURE_STORAGE_ACCOUNT_NAME` can be set to override this configuration. * `account-key`, Account key to use to create authorization signature, mandatory for `SharedKey` or `SharedKeyLite` authorization types, as described [here](https://learn.microsoft.com/en-us/rest/api/storageservices/authorize-with-shared-key). Environment variable `AZURE_STORAGE_ACCOUNT_KEY` can be set to override this configuration. * `sas-token` if authorization type is `sas`. Environment variable `AZURE_STORAGE_SAS_TOKEN` can be set to override this configuration. From b18fd50deda473a0723c08acac95cf05b32a78f3 Mon Sep 17 00:00:00 2001 From: Syed Ali Date: Mon, 9 Sep 2024 11:03:02 -0400 Subject: [PATCH 68/78] Update azure-storage/src/test/resources/logback-test.xml Co-authored-by: Shubham Girdhar --- azure-storage/src/test/resources/logback-test.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-storage/src/test/resources/logback-test.xml b/azure-storage/src/test/resources/logback-test.xml index 0dd4b9f59c..190a393e9b 100644 --- a/azure-storage/src/test/resources/logback-test.xml +++ b/azure-storage/src/test/resources/logback-test.xml @@ -1,6 +1,6 @@ - target/s3.log + target/azure-blob-storage.log false %d{ISO8601} %-5level [%thread] [%logger{36}] %msg%n From 67c2eaa3f59b5a08abfe863443732b2355006e54 Mon Sep 17 00:00:00 2001 From: Syed Ali Date: Mon, 9 Sep 2024 11:03:25 -0400 Subject: [PATCH 69/78] Update azure-storage/src/main/resources/reference.conf Co-authored-by: Shubham Girdhar --- azure-storage/src/main/resources/reference.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-storage/src/main/resources/reference.conf b/azure-storage/src/main/resources/reference.conf index 8c6ab04030..75aab3ad98 100644 --- a/azure-storage/src/main/resources/reference.conf +++ b/azure-storage/src/main/resources/reference.conf @@ -27,7 +27,7 @@ alpakka { } #azure-credentials - # Default settings corresponding to automatic retry of requests in an S3 stream. + # Default settings corresponding to automatic retry of requests in an Azure Blob Storage stream. retry-settings { # The maximum number of additional attempts (following transient errors) that will be made to process a given # request before giving up. From 4cfe27a14ee2ad06757670d6bbd75b59a975a184 Mon Sep 17 00:00:00 2001 From: Syed Ali Date: Mon, 9 Sep 2024 11:03:40 -0400 Subject: [PATCH 70/78] Update azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageAttributes.scala Co-authored-by: Shubham Girdhar --- .../akka/stream/alpakka/azure/storage/StorageAttributes.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageAttributes.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageAttributes.scala index b5890bdac6..1373c8851b 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageAttributes.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageAttributes.scala @@ -15,7 +15,7 @@ import akka.stream.Attributes.Attribute object StorageAttributes { /** - * Settings to use for the S3 stream + * Settings to use for the Azure Blob Storage stream */ def settings(settings: StorageSettings): Attributes = Attributes(StorageSettingsValue(settings)) From f5971a1eb55d6341dc0a6fffe1a96428497d3ded Mon Sep 17 00:00:00 2001 From: Syed Ali Date: Mon, 9 Sep 2024 11:03:53 -0400 Subject: [PATCH 71/78] Update azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala Co-authored-by: Shubham Girdhar --- .../stream/alpakka/azure/storage/impl/AzureStorageStream.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala index aebbdadc1c..7a430e800b 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala @@ -217,7 +217,7 @@ object AzureStorageStream { } private def isTransientError(status: StatusCode): Boolean = { - // 5xx errors from S3 can be treated as transient. + // 5xx errors from Azure Blob Storage can be treated as transient. status.intValue >= 500 } From 1eaf5df14f46fb5764e20ca15c982b6bf6166a49 Mon Sep 17 00:00:00 2001 From: Syed Ali Date: Mon, 9 Sep 2024 11:04:15 -0400 Subject: [PATCH 72/78] Update azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/models.scala Co-authored-by: Shubham Girdhar --- .../main/scala/akka/stream/alpakka/azure/storage/models.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/models.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/models.scala index c19d44f3f9..c7e3de26f2 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/models.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/models.scala @@ -31,7 +31,7 @@ object AzureNameKeyCredential { } } -/** Modelled after ObjectMetadata in S3. +/** Modelled after BlobProperties in Azure Blob Storage. * * @param metadata * Raw Http headers From 18f43364dc2ef13bd220b4c54a2f17cec00029cd Mon Sep 17 00:00:00 2001 From: Syed Ali Date: Tue, 10 Sep 2024 10:45:50 -0400 Subject: [PATCH 73/78] Update azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageAttributes.scala Co-authored-by: Shubham Girdhar --- .../akka/stream/alpakka/azure/storage/StorageAttributes.scala | 1 + 1 file changed, 1 insertion(+) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageAttributes.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageAttributes.scala index 1373c8851b..82bd6c4c85 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageAttributes.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageAttributes.scala @@ -31,6 +31,7 @@ object StorageAttributes { } final class StorageSettingsPath private (val path: String) extends Attribute + object StorageSettingsPath { val Default: StorageSettingsPath = StorageSettingsPath(StorageSettings.ConfigPath) From fcd4326c5383a2754e7bd7b529b6079c26485cbc Mon Sep 17 00:00:00 2001 From: Syed Ali Date: Tue, 10 Sep 2024 10:46:05 -0400 Subject: [PATCH 74/78] Update azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageAttributes.scala Co-authored-by: Shubham Girdhar --- .../akka/stream/alpakka/azure/storage/StorageAttributes.scala | 1 + 1 file changed, 1 insertion(+) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageAttributes.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageAttributes.scala index 82bd6c4c85..7d1918b51c 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageAttributes.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageAttributes.scala @@ -39,6 +39,7 @@ object StorageSettingsPath { } final class StorageSettingsValue private (val settings: StorageSettings) extends Attribute + object StorageSettingsValue { def apply(settings: StorageSettings) = new StorageSettingsValue(settings) } From 948db31731ec11b14d01aa4537601bf038c2e25a Mon Sep 17 00:00:00 2001 From: sfali Date: Tue, 10 Sep 2024 12:06:12 -0400 Subject: [PATCH 75/78] Fail safe error message #3253 1. Make sure we don't emit empty string 2. correct configuration --- .../stream/alpakka/azure/storage/StorageException.scala | 5 +++-- azure-storage/src/test/resources/application.conf | 6 ++---- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageException.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageException.scala index 20eb706a32..40a930d85c 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageException.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/StorageException.scala @@ -64,10 +64,11 @@ object StorageException { ) } match { case Failure(ex) => + val errorMessage = emptyStringToOption(ex.getMessage) StorageException( statusCode = statusCode, - errorCode = Option(ex.getMessage).getOrElse("null"), - errorMessage = Option(response).getOrElse("null"), + errorCode = errorMessage.getOrElse("null"), + errorMessage = emptyStringToOption(response).orElse(errorMessage).getOrElse("null"), resourceName = None, resourceValue = None, reason = None diff --git a/azure-storage/src/test/resources/application.conf b/azure-storage/src/test/resources/application.conf index 29bcdf4dcd..1fe23177ea 100644 --- a/azure-storage/src/test/resources/application.conf +++ b/azure-storage/src/test/resources/application.conf @@ -13,10 +13,8 @@ azurite { akka { http { - client { - parsing { - max-response-reason-length = 512 - } + parsing { + max-response-reason-length = 512 } } } From 689cd533d8e8e89aa76331cdf35a14ec2144d6dd Mon Sep 17 00:00:00 2001 From: sfali Date: Tue, 10 Sep 2024 22:25:39 -0400 Subject: [PATCH 76/78] Delete Container support #3253 1. Request builder for delete container 2. Implementation of delete container and Scala and Java API 3. Tests for delete container 4. Documentation --- .../storage/impl/AzureStorageStream.scala | 8 +++ .../azure/storage/javadsl/BlobService.scala | 12 +++++ .../storage/requests/DeleteContainer.scala | 54 +++++++++++++++++++ .../azure/storage/scaladsl/BlobService.scala | 12 +++++ .../test/java/docs/javadsl/StorageTest.java | 17 ++++++ .../scaladsl/AzuriteIntegrationSpec.scala | 10 ---- .../scaladsl/StorageIntegrationSpec.scala | 40 +++++++++++--- .../scaladsl/StorageWireMockBase.scala | 10 ++++ .../scala/docs/scaladsl/StorageSpec.scala | 39 +++++++++++++- docs/src/main/paradox/azure-storage.md | 11 ++++ 10 files changed, 195 insertions(+), 18 deletions(-) create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/DeleteContainer.scala diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala index 7a430e800b..bace04b460 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala @@ -30,6 +30,7 @@ import akka.stream.alpakka.azure.storage.requests.{ ClearFileRange, CreateContainer, CreateFile, + DeleteContainer, GetProperties, RequestBuilder, UpdateFileRange @@ -132,6 +133,13 @@ object AzureStorageStream { objectPath = objectPath, requestBuilder = requestBuilder) + private[storage] def deleteContainer(objectPath: String, + requestBuilder: DeleteContainer): Source[Option[ObjectMetadata], NotUsed] = + handleRequest(successCode = Accepted, + storageType = BlobType, + objectPath = objectPath, + requestBuilder = requestBuilder) + /** * Common function to handle all requests where we don't expect response body. * diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala index bd1ce927c9..266c1a4612 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/BlobService.scala @@ -12,6 +12,7 @@ import akka.http.scaladsl.model.HttpEntity import akka.stream.alpakka.azure.storage.impl.AzureStorageStream import akka.stream.alpakka.azure.storage.requests.{ CreateContainer, + DeleteContainer, DeleteFile, GetBlob, GetProperties, @@ -133,4 +134,15 @@ object BlobService { */ def createContainer(objectPath: String, requestBuilder: CreateContainer): Source[Optional[ObjectMetadata], NotUsed] = AzureStorageStream.createContainer(objectPath, requestBuilder).map(opt => Optional.ofNullable(opt.orNull)).asJava + + /** + * Delete container. + * + * @param objectPath name of the container + * @param requestBuilder builder to build deleteContainer request + * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def deleteContainer(objectPath: String, requestBuilder: DeleteContainer): Source[Optional[ObjectMetadata], NotUsed] = + AzureStorageStream.deleteContainer(objectPath, requestBuilder).map(opt => Optional.ofNullable(opt.orNull)).asJava } diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/DeleteContainer.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/DeleteContainer.scala new file mode 100644 index 0000000000..4bd931124c --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/DeleteContainer.scala @@ -0,0 +1,54 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package requests + +import akka.http.scaladsl.model.{HttpHeader, HttpMethod, HttpMethods} +import akka.stream.alpakka.azure.storage.headers.ServerSideEncryption +import akka.stream.alpakka.azure.storage.impl.StorageHeaders + +final class DeleteContainer(val leaseId: Option[String] = None, + override val sse: Option[ServerSideEncryption] = None, + override val additionalHeaders: Seq[HttpHeader] = Seq.empty) + extends RequestBuilder { + + override protected val method: HttpMethod = HttpMethods.DELETE + + override protected val queryParams: Map[String, String] = super.queryParams ++ Map("restype" -> "container") + + def withLeaseId(leaseId: String): DeleteContainer = copy(leaseId = Option(leaseId)) + + override def withServerSideEncryption(sse: ServerSideEncryption): DeleteContainer = copy(sse = Option(sse)) + + override def addHeader(httpHeader: HttpHeader): DeleteContainer = + copy(additionalHeaders = additionalHeaders :+ httpHeader) + + override protected def getHeaders: Seq[HttpHeader] = + StorageHeaders() + .witServerSideEncryption(sse) + .withLeaseIdHeader(leaseId) + .withAdditionalHeaders(additionalHeaders) + .headers + + private def copy(leaseId: Option[String] = leaseId, + sse: Option[ServerSideEncryption] = sse, + additionalHeaders: Seq[HttpHeader] = additionalHeaders) = + new DeleteContainer(leaseId = leaseId, sse = sse, additionalHeaders = additionalHeaders) +} + +object DeleteContainer { + + /* + * Scala API + */ + def apply(): DeleteContainer = new DeleteContainer() + + /* + * Java API + */ + def create(): DeleteContainer = DeleteContainer() +} diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala index e4b508a848..8eceac1939 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/BlobService.scala @@ -13,6 +13,7 @@ import akka.stream.alpakka.azure.storage.impl.AzureStorageStream import akka.stream.alpakka.azure.storage.requests.{ CreateContainer, DeleteBlob, + DeleteContainer, GetBlob, GetProperties, PutAppendBlock, @@ -110,4 +111,15 @@ object BlobService { */ def createContainer(objectPath: String, requestBuilder: CreateContainer): Source[Option[ObjectMetadata], NotUsed] = AzureStorageStream.createContainer(objectPath, requestBuilder) + + /** + * Delete container. + * + * @param objectPath name of the container + * @param requestBuilder builder to build deleteContainer request + * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def deleteContainer(objectPath: String, requestBuilder: DeleteContainer): Source[Option[ObjectMetadata], NotUsed] = + AzureStorageStream.deleteContainer(objectPath, requestBuilder) } diff --git a/azure-storage/src/test/java/docs/javadsl/StorageTest.java b/azure-storage/src/test/java/docs/javadsl/StorageTest.java index dba0e62e33..29cd3bfcc9 100644 --- a/azure-storage/src/test/java/docs/javadsl/StorageTest.java +++ b/azure-storage/src/test/java/docs/javadsl/StorageTest.java @@ -14,6 +14,7 @@ import akka.stream.alpakka.azure.storage.requests.ClearFileRange; import akka.stream.alpakka.azure.storage.requests.CreateContainer; import akka.stream.alpakka.azure.storage.requests.CreateFile; +import akka.stream.alpakka.azure.storage.requests.DeleteContainer; import akka.stream.alpakka.azure.storage.requests.DeleteFile; import akka.stream.alpakka.azure.storage.requests.GetBlob; import akka.stream.alpakka.azure.storage.requests.GetFile; @@ -82,6 +83,22 @@ public void createContainer() throws Exception { Assert.assertEquals(objectMetadata.getETag().get(), ETagRawValue()); } + @Test + public void deleteContainer() throws Exception { + mockDeleteContainer(); + + //#delete-container + final Source, NotUsed> source = BlobService.deleteContainer(containerName(), DeleteContainer.create()); + + final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); + //#delete-container + + final var optionalObjectMetadata = optionalCompletionStage.toCompletableFuture().get(); + Assert.assertTrue(optionalObjectMetadata.isPresent()); + final var objectMetadata = optionalObjectMetadata.get(); + Assert.assertEquals(objectMetadata.getContentLength(), 0L); + } + // TODO: There are couple of issues, firstly there are two `Content-Length` headers being added, one by `putBlob` // function and secondly by, most likely, by WireMock. Need to to figure out how to tell WireMock not to add `Content-Length` diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzuriteIntegrationSpec.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzuriteIntegrationSpec.scala index f096630656..2ffff35c23 100644 --- a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzuriteIntegrationSpec.scala +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzuriteIntegrationSpec.scala @@ -12,9 +12,6 @@ import akka.stream.Attributes import com.dimafeng.testcontainers.ForAllTestContainer import org.scalatest.Ignore -import scala.concurrent.duration._ -import scala.concurrent.Await - // TODO: investigate how Azurite works, it is not even working with pure Java API // `putBlob` operations fails with "Premature end of file." error with BadRequest. @Ignore @@ -27,12 +24,5 @@ class AzuriteIntegrationSpec extends StorageIntegrationSpec with ForAllTestConta protected lazy val blobSettings: StorageSettings = StorageExt(system).settings("azurite").withEndPointUrl(container.getBlobHostAddress) - override protected def beforeAll(): Unit = { - super.beforeAll() - - val eventualDone = createContainer(defaultContainerName) - Await.result(eventualDone, 10.seconds) - } - override protected def getDefaultAttributes: Attributes = StorageAttributes.settings(blobSettings) } diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageIntegrationSpec.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageIntegrationSpec.scala index 1c23463937..b8e2243334 100644 --- a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageIntegrationSpec.scala +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageIntegrationSpec.scala @@ -13,7 +13,14 @@ import akka.http.scaladsl.Http import akka.http.scaladsl.model.ContentTypes import akka.http.scaladsl.model.headers.ByteRange import akka.stream.Attributes -import akka.stream.alpakka.azure.storage.requests.{CreateContainer, DeleteBlob, GetBlob, GetProperties, PutBlockBlob} +import akka.stream.alpakka.azure.storage.requests.{ + CreateContainer, + DeleteBlob, + DeleteContainer, + GetBlob, + GetProperties, + PutBlockBlob +} import akka.stream.alpakka.testkit.scaladsl.LogCapturing import akka.stream.scaladsl.{Flow, Framing, Keep, Sink, Source} import akka.testkit.TestKit @@ -56,6 +63,19 @@ trait StorageIntegrationSpec protected def getDefaultAttributes: Attributes = StorageAttributes.settings(StorageSettings()) "BlobService" should { + "create container" in { + val maybeObjectMetadata = + BlobService + .createContainer(objectPath = defaultContainerName, requestBuilder = CreateContainer()) + .withAttributes(getDefaultAttributes) + .runWith(Sink.head) + .futureValue + + maybeObjectMetadata shouldBe defined + val objectMetadata = maybeObjectMetadata.get + objectMetadata.contentLength shouldBe 0L + } + "put blob" in { val maybeObjectMetadata = BlobService @@ -142,13 +162,19 @@ trait StorageIntegrationSpec maybeObjectMetadata shouldBe empty } - } - protected def createContainer(containerName: String): Future[Done] = { - BlobService - .createContainer(containerName, CreateContainer()) - .withAttributes(getDefaultAttributes) - .runWith(Sink.ignore) + "delete container" in { + val maybeObjectMetadata = + BlobService + .deleteContainer(objectPath = defaultContainerName, requestBuilder = DeleteContainer()) + .withAttributes(getDefaultAttributes) + .runWith(Sink.head) + .futureValue + + maybeObjectMetadata shouldBe defined + val objectMetadata = maybeObjectMetadata.get + objectMetadata.contentLength shouldBe 0L + } } protected def calculateDigest(text: String): String = { diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala index 416f0226da..7959d8ddbc 100644 --- a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala @@ -62,6 +62,16 @@ abstract class StorageWireMockBase(_system: ActorSystem, val _wireMockServer: Wi ) ) + protected def mockDeleteContainer(): StubMapping = + mock.register( + delete(urlEqualTo(s"/$AccountName/$containerName?restype=container")) + .willReturn( + aResponse() + .withStatus(202) + .withHeader(`Content-Length`.name, "0") + ) + ) + protected def mockPutBlockBlob(): StubMapping = mock.register( put(urlEqualTo(s"/$AccountName/$containerName/$blobName")) diff --git a/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala b/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala index d80888b839..a3ac4eb5cc 100644 --- a/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala +++ b/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala @@ -8,7 +8,6 @@ import akka.NotUsed import akka.http.scaladsl.model.{ContentTypes, StatusCodes} import akka.stream.alpakka.azure.storage.StorageException import akka.stream.alpakka.azure.storage.headers.ServerSideEncryption -import akka.stream.alpakka.azure.storage.requests._ import akka.stream.alpakka.azure.storage.scaladsl.StorageWireMockBase import akka.stream.alpakka.azure.storage.scaladsl.StorageWireMockBase.ETagRawValue import akka.stream.alpakka.testkit.scaladsl.LogCapturing @@ -44,6 +43,7 @@ class StorageSpec //#create-container import akka.stream.alpakka.azure.storage.scaladsl.BlobService import akka.stream.alpakka.azure.storage.ObjectMetadata + import akka.stream.alpakka.azure.storage.requests.CreateContainer val source: Source[Option[ObjectMetadata], NotUsed] = BlobService.createContainer(containerName, CreateContainer()) @@ -58,6 +58,26 @@ class StorageSpec objectMetadata.eTag shouldBe Some(ETagRawValue) } + "delete container" in { + mockDeleteContainer() + + //#delete-container + import akka.stream.alpakka.azure.storage.scaladsl.BlobService + import akka.stream.alpakka.azure.storage.ObjectMetadata + import akka.stream.alpakka.azure.storage.requests.DeleteContainer + + val source: Source[Option[ObjectMetadata], NotUsed] = + BlobService.deleteContainer(containerName, DeleteContainer()) + + val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.runWith(Sink.head) + //#delete-container + + val maybeObjectMetadata = eventualMaybeMetadata.futureValue + maybeObjectMetadata shouldBe defined + val objectMetadata = maybeObjectMetadata.get + objectMetadata.contentLength shouldBe 0L + } + // TODO: There are couple of issues, firstly there are two `Content-Length` headers being added, one by `putBlob` // function and secondly by, most likely, by WireMock. Need to to figure out how to tell WireMock not to add `Content-Length` // header, secondly once that resolve then we get `akka.http.scaladsl.model.EntityStreamException`. @@ -67,6 +87,7 @@ class StorageSpec //#put-block-blob import akka.stream.alpakka.azure.storage.scaladsl.BlobService import akka.stream.alpakka.azure.storage.ObjectMetadata + import akka.stream.alpakka.azure.storage.requests.PutBlockBlob val source: Source[Option[ObjectMetadata], NotUsed] = BlobService.putBlockBlob( @@ -94,6 +115,7 @@ class StorageSpec //#put-page-blob import akka.stream.alpakka.azure.storage.scaladsl.BlobService import akka.stream.alpakka.azure.storage.ObjectMetadata + import akka.stream.alpakka.azure.storage.requests.PutPageBlock val source: Source[Option[ObjectMetadata], NotUsed] = BlobService.putPageBlock( @@ -120,6 +142,7 @@ class StorageSpec //#put-append-blob import akka.stream.alpakka.azure.storage.scaladsl.BlobService import akka.stream.alpakka.azure.storage.ObjectMetadata + import akka.stream.alpakka.azure.storage.requests.PutAppendBlock val source: Source[Option[ObjectMetadata], NotUsed] = BlobService.putAppendBlock(objectPath = s"$containerName/$blobName", @@ -141,6 +164,7 @@ class StorageSpec //#get-blob import akka.stream.alpakka.azure.storage.scaladsl.BlobService import akka.stream.alpakka.azure.storage.ObjectMetadata + import akka.stream.alpakka.azure.storage.requests.GetBlob val source: Source[ByteString, Future[ObjectMetadata]] = BlobService.getBlob(objectPath = s"$containerName/$blobName", GetBlob()) @@ -156,6 +180,7 @@ class StorageSpec import akka.stream.alpakka.azure.storage.scaladsl.BlobService import akka.stream.alpakka.azure.storage.ObjectMetadata + import akka.stream.alpakka.azure.storage.requests.GetBlob val source: Source[ByteString, Future[ObjectMetadata]] = BlobService.getBlob(objectPath = s"$containerName/$blobName", GetBlob().withVersionId("versionId")) @@ -170,6 +195,7 @@ class StorageSpec import akka.stream.alpakka.azure.storage.scaladsl.BlobService import akka.stream.alpakka.azure.storage.ObjectMetadata + import akka.stream.alpakka.azure.storage.requests.GetBlob val source: Source[ByteString, Future[ObjectMetadata]] = BlobService.getBlob(objectPath = s"$containerName/$blobName", GetBlob().withLeaseId("leaseId")) @@ -184,6 +210,7 @@ class StorageSpec import akka.stream.alpakka.azure.storage.scaladsl.BlobService import akka.stream.alpakka.azure.storage.ObjectMetadata + import akka.stream.alpakka.azure.storage.requests.GetBlob val source: Source[ByteString, Future[ObjectMetadata]] = BlobService.getBlob(objectPath = s"$containerName/$blobName", @@ -199,6 +226,7 @@ class StorageSpec import akka.stream.alpakka.azure.storage.scaladsl.BlobService import akka.stream.alpakka.azure.storage.ObjectMetadata + import akka.stream.alpakka.azure.storage.requests.GetBlob val source: Source[ByteString, Future[ObjectMetadata]] = BlobService.getBlob(objectPath = s"$containerName/$blobName", GetBlob()) @@ -221,6 +249,7 @@ class StorageSpec //#get-blob-range import akka.stream.alpakka.azure.storage.scaladsl.BlobService import akka.stream.alpakka.azure.storage.ObjectMetadata + import akka.stream.alpakka.azure.storage.requests.GetBlob val source: Source[ByteString, Future[ObjectMetadata]] = BlobService.getBlob(objectPath = s"$containerName/$blobName", requestBuilder = GetBlob().withRange(subRange)) @@ -237,6 +266,7 @@ class StorageSpec //#get-blob-properties import akka.stream.alpakka.azure.storage.scaladsl.BlobService import akka.stream.alpakka.azure.storage.ObjectMetadata + import akka.stream.alpakka.azure.storage.requests.GetProperties val source: Source[Option[ObjectMetadata], NotUsed] = BlobService.getProperties(objectPath = s"$containerName/$blobName", GetProperties()) @@ -258,6 +288,7 @@ class StorageSpec //#delete-blob import akka.stream.alpakka.azure.storage.scaladsl.BlobService import akka.stream.alpakka.azure.storage.ObjectMetadata + import akka.stream.alpakka.azure.storage.requests.DeleteBlob val source: Source[Option[ObjectMetadata], NotUsed] = BlobService.deleteBlob(objectPath = s"$containerName/$blobName", DeleteBlob()) @@ -281,6 +312,7 @@ class StorageSpec //#create-file import akka.stream.alpakka.azure.storage.scaladsl.FileService import akka.stream.alpakka.azure.storage.ObjectMetadata + import akka.stream.alpakka.azure.storage.requests.CreateFile val source: Source[Option[ObjectMetadata], NotUsed] = FileService.createFile(objectPath = s"$containerName/$blobName", @@ -305,6 +337,7 @@ class StorageSpec //#update-range import akka.stream.alpakka.azure.storage.scaladsl.FileService import akka.stream.alpakka.azure.storage.ObjectMetadata + import akka.stream.alpakka.azure.storage.requests.UpdateFileRange val source: Source[Option[ObjectMetadata], NotUsed] = FileService.updateRange( @@ -329,6 +362,7 @@ class StorageSpec //#get-file import akka.stream.alpakka.azure.storage.scaladsl.FileService import akka.stream.alpakka.azure.storage.ObjectMetadata + import akka.stream.alpakka.azure.storage.requests.GetFile val source: Source[ByteString, Future[ObjectMetadata]] = FileService.getFile(objectPath = s"$containerName/$blobName", GetFile()) @@ -345,6 +379,7 @@ class StorageSpec //#get-file-properties import akka.stream.alpakka.azure.storage.scaladsl.FileService import akka.stream.alpakka.azure.storage.ObjectMetadata + import akka.stream.alpakka.azure.storage.requests.GetProperties val source: Source[Option[ObjectMetadata], NotUsed] = FileService.getProperties(objectPath = s"$containerName/$blobName", GetProperties()) @@ -369,6 +404,7 @@ class StorageSpec //#clear-range import akka.stream.alpakka.azure.storage.scaladsl.FileService import akka.stream.alpakka.azure.storage.ObjectMetadata + import akka.stream.alpakka.azure.storage.requests.ClearFileRange val source: Source[Option[ObjectMetadata], NotUsed] = FileService.clearRange(objectPath = s"$containerName/$blobName", requestBuilder = ClearFileRange(subRange)) @@ -389,6 +425,7 @@ class StorageSpec //#delete-file import akka.stream.alpakka.azure.storage.scaladsl.FileService import akka.stream.alpakka.azure.storage.ObjectMetadata + import akka.stream.alpakka.azure.storage.requests.DeleteFile val source: Source[Option[ObjectMetadata], NotUsed] = FileService.deleteFile(objectPath = s"$containerName/$blobName", DeleteFile()) diff --git a/docs/src/main/paradox/azure-storage.md b/docs/src/main/paradox/azure-storage.md index 913405f026..365d631dd2 100644 --- a/docs/src/main/paradox/azure-storage.md +++ b/docs/src/main/paradox/azure-storage.md @@ -125,6 +125,17 @@ Scala Java : @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.java) { #create-container } +### Create Container + +The [`Delete Container`](https://learn.microsoft.com/en-us/rest/api/storageservices/delete-container) operation creates existing container under the specified account. + +Scala +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #delete-container } + +Java +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.java) { #delete-container } + + ### Put Block Blob The [`Put Block Blob`](https://learn.microsoft.com/en-us/rest/api/storageservices/put-blob) operation creates a new block or updates the content of an existing block blob. From d6d9ed05d17cd57212a2be74fbc728f114e1cfa1 Mon Sep 17 00:00:00 2001 From: sfali Date: Wed, 11 Sep 2024 11:05:50 -0400 Subject: [PATCH 77/78] Create & Delete directory support #3253 1. Request builder for create & delete directories 2. Implementation of create & delete directories and Scala and Java API 3. Tests for create & delete directories 4. Documentation --- .../storage/impl/AzureStorageStream.scala | 24 ++++- .../azure/storage/javadsl/FileService.scala | 27 +++++ .../storage/requests/CreateDirectory.scala | 48 +++++++++ .../storage/requests/DeleteDirectory.scala | 48 +++++++++ .../azure/storage/scaladsl/FileService.scala | 24 +++++ .../test/java/docs/javadsl/StorageTest.java | 37 ++++++- .../scaladsl/AzureIntegrationTest.scala | 98 +++++++++++++++++-- .../scaladsl/StorageIntegrationSpec.scala | 25 +++-- .../scaladsl/StorageWireMockBase.scala | 21 ++++ .../scala/docs/scaladsl/StorageSpec.scala | 41 ++++++++ docs/src/main/paradox/azure-storage.md | 23 ++++- 11 files changed, 391 insertions(+), 25 deletions(-) create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/CreateDirectory.scala create mode 100644 azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/DeleteDirectory.scala diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala index bace04b460..2488bf5bfe 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala @@ -29,8 +29,10 @@ import akka.stream.alpakka.azure.storage.impl.auth.Signer import akka.stream.alpakka.azure.storage.requests.{ ClearFileRange, CreateContainer, + CreateDirectory, CreateFile, DeleteContainer, + DeleteDirectory, GetProperties, RequestBuilder, UpdateFileRange @@ -140,6 +142,20 @@ object AzureStorageStream { objectPath = objectPath, requestBuilder = requestBuilder) + private[storage] def createDirectory(directoryPath: String, + requestBuilder: CreateDirectory): Source[Option[ObjectMetadata], NotUsed] = + handleRequest(successCode = Created, + storageType = FileType, + objectPath = directoryPath, + requestBuilder = requestBuilder) + + private[storage] def deleteDirectory(directoryPath: String, + requestBuilder: DeleteDirectory): Source[Option[ObjectMetadata], NotUsed] = + handleRequest(successCode = Accepted, + storageType = FileType, + objectPath = directoryPath, + requestBuilder = requestBuilder) + /** * Common function to handle all requests where we don't expect response body. * @@ -168,7 +184,13 @@ object AzureStorageStream { case HttpResponse(sc, h, entity, _) if sc == successCode => Source.future(entity.withoutSizeLimit().discardBytes().future().map(_ => Some(computeMetaData(h, entity)))) case HttpResponse(NotFound, _, entity, _) => - Source.future(entity.withoutSizeLimit().discardBytes().future().map(_ => None)(ExecutionContexts.parasitic)) + Source.future( + entity + .withoutSizeLimit() + .discardBytes() + .future() + .map(_ => None)(ExecutionContexts.parasitic) + ) case response: HttpResponse => Source.future(unmarshalError(response.status, response.entity)) } } diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala index f53b0aaca7..83a3eaeead 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/javadsl/FileService.scala @@ -12,7 +12,9 @@ import akka.http.scaladsl.model.HttpEntity import akka.stream.alpakka.azure.storage.impl.AzureStorageStream import akka.stream.alpakka.azure.storage.requests.{ ClearFileRange, + CreateDirectory, CreateFile, + DeleteDirectory, DeleteFile, GetFile, GetProperties, @@ -125,4 +127,29 @@ object FileService { .clearRange(objectPath, requestBuilder) .map(opt => Optional.ofNullable(opt.orNull)) .asJava + + /** + * Create directory. + * + * @param directoryPath path of the directory to be created, e.g., `myshare/myparentdirectorypath/mydirectory` + * @param requestBuilder builder to build createDirectory request + * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def createDirectory(directoryPath: String, + requestBuilder: CreateDirectory): Source[Optional[ObjectMetadata], NotUsed] = + AzureStorageStream.createDirectory(directoryPath, requestBuilder).map(opt => Optional.ofNullable(opt.orNull)).asJava + + /** + * Delete directory. + * + * @param directoryPath path of the directory to be deleted, e.g., `myshare/myparentdirectorypath/mydirectory` + * @param requestBuilder builder to build deleteDirectory request + * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def deleteDirectory(directoryPath: String, + requestBuilder: DeleteDirectory): Source[Optional[ObjectMetadata], NotUsed] = + AzureStorageStream.deleteDirectory(directoryPath, requestBuilder).map(opt => Optional.ofNullable(opt.orNull)).asJava + } diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/CreateDirectory.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/CreateDirectory.scala new file mode 100644 index 0000000000..78d5c2db72 --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/CreateDirectory.scala @@ -0,0 +1,48 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package requests + +import akka.http.scaladsl.model.{HttpHeader, HttpMethod, HttpMethods} +import akka.stream.alpakka.azure.storage.headers.ServerSideEncryption +import akka.stream.alpakka.azure.storage.impl.StorageHeaders + +final class CreateDirectory(override val sse: Option[ServerSideEncryption] = None, + override val additionalHeaders: Seq[HttpHeader] = Seq.empty) + extends RequestBuilder { + + override protected val method: HttpMethod = HttpMethods.PUT + + override protected val queryParams: Map[String, String] = super.queryParams ++ Map("restype" -> "directory") + + override def withServerSideEncryption(sse: ServerSideEncryption): CreateDirectory = copy(sse = Option(sse)) + + override def addHeader(httpHeader: HttpHeader): CreateDirectory = + copy(additionalHeaders = additionalHeaders :+ httpHeader) + + private def copy(sse: Option[ServerSideEncryption] = sse, additionalHeaders: Seq[HttpHeader] = additionalHeaders) = + new CreateDirectory(sse = sse, additionalHeaders = additionalHeaders) + + override protected def getHeaders: Seq[HttpHeader] = + StorageHeaders() + .witServerSideEncryption(sse) + .withAdditionalHeaders(additionalHeaders) + .headers +} + +object CreateDirectory { + + /* + * Scala API + */ + def apply(): CreateDirectory = new CreateDirectory() + + /* + * Java API + */ + def create(): CreateDirectory = CreateDirectory() +} diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/DeleteDirectory.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/DeleteDirectory.scala new file mode 100644 index 0000000000..b44efaff9f --- /dev/null +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/requests/DeleteDirectory.scala @@ -0,0 +1,48 @@ +/* + * Copyright (C) since 2016 Lightbend Inc. + */ + +package akka.stream.alpakka +package azure +package storage +package requests + +import akka.http.scaladsl.model.{HttpHeader, HttpMethod, HttpMethods} +import akka.stream.alpakka.azure.storage.headers.ServerSideEncryption +import akka.stream.alpakka.azure.storage.impl.StorageHeaders + +final class DeleteDirectory(override val sse: Option[ServerSideEncryption] = None, + override val additionalHeaders: Seq[HttpHeader] = Seq.empty) + extends RequestBuilder { + + override protected val method: HttpMethod = HttpMethods.DELETE + + override protected val queryParams: Map[String, String] = super.queryParams ++ Map("restype" -> "directory") + + override def withServerSideEncryption(sse: ServerSideEncryption): DeleteDirectory = copy(sse = Option(sse)) + + override def addHeader(httpHeader: HttpHeader): DeleteDirectory = + copy(additionalHeaders = additionalHeaders :+ httpHeader) + + private def copy(sse: Option[ServerSideEncryption] = sse, additionalHeaders: Seq[HttpHeader] = additionalHeaders) = + new DeleteDirectory(sse = sse, additionalHeaders = additionalHeaders) + + override protected def getHeaders: Seq[HttpHeader] = + StorageHeaders() + .witServerSideEncryption(sse) + .withAdditionalHeaders(additionalHeaders) + .headers +} + +object DeleteDirectory { + + /* + * Scala API + */ + def apply(): DeleteDirectory = new DeleteDirectory() + + /* + * Java API + */ + def create(): DeleteDirectory = DeleteDirectory() +} diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala index dbf099008f..2f61889745 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/scaladsl/FileService.scala @@ -12,7 +12,9 @@ import akka.http.scaladsl.model.HttpEntity import akka.stream.alpakka.azure.storage.impl.AzureStorageStream import akka.stream.alpakka.azure.storage.requests.{ ClearFileRange, + CreateDirectory, CreateFile, + DeleteDirectory, DeleteFile, GetFile, GetProperties, @@ -101,4 +103,26 @@ object FileService { */ def clearRange(objectPath: String, requestBuilder: ClearFileRange): Source[Option[ObjectMetadata], NotUsed] = AzureStorageStream.clearRange(objectPath, requestBuilder) + + /** + * Create directory. + * + * @param directoryPath path of the directory to be created, e.g., `myshare/myparentdirectorypath/mydirectory` + * @param requestBuilder builder to build createDirectory request + * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def createDirectory(directoryPath: String, requestBuilder: CreateDirectory): Source[Option[ObjectMetadata], NotUsed] = + AzureStorageStream.createDirectory(directoryPath, requestBuilder) + + /** + * Delete directory. + * + * @param directoryPath path of the directory to be deleted, e.g., `myshare/myparentdirectorypath/mydirectory` + * @param requestBuilder builder to build deleteDirectory request + * @return A [[akka.stream.scaladsl.Source Source]] containing an [[scala.Option]] of + * [[akka.stream.alpakka.azure.storage.ObjectMetadata]], will be [[scala.None]] in case the object does not exist + */ + def deleteDirectory(directoryPath: String, requestBuilder: DeleteDirectory): Source[Option[ObjectMetadata], NotUsed] = + AzureStorageStream.deleteDirectory(directoryPath, requestBuilder) } diff --git a/azure-storage/src/test/java/docs/javadsl/StorageTest.java b/azure-storage/src/test/java/docs/javadsl/StorageTest.java index 29cd3bfcc9..68cdca48fb 100644 --- a/azure-storage/src/test/java/docs/javadsl/StorageTest.java +++ b/azure-storage/src/test/java/docs/javadsl/StorageTest.java @@ -13,8 +13,10 @@ import akka.stream.alpakka.azure.storage.javadsl.FileService; import akka.stream.alpakka.azure.storage.requests.ClearFileRange; import akka.stream.alpakka.azure.storage.requests.CreateContainer; +import akka.stream.alpakka.azure.storage.requests.CreateDirectory; import akka.stream.alpakka.azure.storage.requests.CreateFile; import akka.stream.alpakka.azure.storage.requests.DeleteContainer; +import akka.stream.alpakka.azure.storage.requests.DeleteDirectory; import akka.stream.alpakka.azure.storage.requests.DeleteFile; import akka.stream.alpakka.azure.storage.requests.GetBlob; import akka.stream.alpakka.azure.storage.requests.GetFile; @@ -65,7 +67,6 @@ public static void afterAll() { .thenRun(() -> TestKit.shutdownActorSystem(system)); } - @Test public void createContainer() throws Exception { mockCreateContainer(); @@ -99,7 +100,6 @@ public void deleteContainer() throws Exception { Assert.assertEquals(objectMetadata.getContentLength(), 0L); } - // TODO: There are couple of issues, firstly there are two `Content-Length` headers being added, one by `putBlob` // function and secondly by, most likely, by WireMock. Need to to figure out how to tell WireMock not to add `Content-Length` // header, secondly once that resolve then we get `akka.http.scaladsl.model.EntityStreamException`. @@ -224,6 +224,39 @@ public void deleteBlob() throws Exception { Assert.assertEquals(0L, objectMetadata.getContentLength()); } + @Test + public void createDirectory() throws Exception { + mockCreateDirectory(); + + //#create-directory + final Source, NotUsed> source = FileService.createDirectory(containerName(), CreateDirectory.create()); + + final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); + //#create-directory + + final var optionalObjectMetadata = optionalCompletionStage.toCompletableFuture().get(); + Assert.assertTrue(optionalObjectMetadata.isPresent()); + final var objectMetadata = optionalObjectMetadata.get(); + Assert.assertEquals(objectMetadata.getContentLength(), 0L); + Assert.assertEquals(objectMetadata.getETag().get(), ETagRawValue()); + } + + @Test + public void deleteDirectory() throws Exception { + mockDeleteDirectory(); + + //#delete-directory + final Source, NotUsed> source = FileService.deleteDirectory(containerName(), DeleteDirectory.create()); + + final CompletionStage> optionalCompletionStage = source.runWith(Sink.head(), system); + //#delete-directory + + final var optionalObjectMetadata = optionalCompletionStage.toCompletableFuture().get(); + Assert.assertTrue(optionalObjectMetadata.isPresent()); + final var objectMetadata = optionalObjectMetadata.get(); + Assert.assertEquals(objectMetadata.getContentLength(), 0L); + } + @Test public void createFile() throws Exception { mockCreateFile(); diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureIntegrationTest.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureIntegrationTest.scala index 08a26bfdd1..88b05bbf78 100644 --- a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureIntegrationTest.scala +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/AzureIntegrationTest.scala @@ -12,7 +12,9 @@ import akka.http.scaladsl.model.ContentTypes import akka.http.scaladsl.model.headers.ByteRange import akka.stream.alpakka.azure.storage.requests.{ ClearFileRange, + CreateDirectory, CreateFile, + DeleteDirectory, DeleteFile, GetFile, GetProperties, @@ -29,9 +31,21 @@ class AzureIntegrationTest extends StorageIntegrationSpec { // Azurite doesn't support FileService yet so can't add these tests in the base class "FileService" should { + "create directory" in { + val maybeObjectMetadata = FileService + .createDirectory(directoryPath = defaultDirectoryPath, requestBuilder = CreateDirectory()) + .withAttributes(getDefaultAttributes) + .runWith(Sink.head) + .futureValue + + maybeObjectMetadata.isDefined shouldBe true + val objectMetadata = maybeObjectMetadata.get + objectMetadata.contentLength shouldBe 0 + } + "create file" in { val maybeObjectMetadata = FileService - .createFile(objectPath = objectPath, + .createFile(objectPath = fileObjectPath, requestBuilder = CreateFile(contentLength, ContentTypes.`text/plain(UTF-8)`)) .withAttributes(getDefaultAttributes) .runWith(Sink.head) @@ -45,7 +59,7 @@ class AzureIntegrationTest extends StorageIntegrationSpec { "put range" in { val maybeObjectMetadata = FileService .updateRange( - objectPath = objectPath, + objectPath = fileObjectPath, requestBuilder = UpdateFileRange(ByteRange(0, contentLength - 1), ContentTypes.`text/plain(UTF-8)`), payload = Source.single(ByteString(sampleText)) ) @@ -60,7 +74,7 @@ class AzureIntegrationTest extends StorageIntegrationSpec { "get file" in { val (maybeEventualObjectMetadata, eventualText) = FileService - .getFile(objectPath = objectPath, GetFile()) + .getFile(objectPath = fileObjectPath, GetFile()) .via(framing) .map(byteString => byteString.utf8String + System.lineSeparator()) .toMat(Sink.seq)(Keep.both) @@ -75,7 +89,7 @@ class AzureIntegrationTest extends StorageIntegrationSpec { "get file properties" in { val maybeObjectMetadata = FileService - .getProperties(objectPath, GetProperties()) + .getProperties(fileObjectPath, GetProperties()) .withAttributes(getDefaultAttributes) .runWith(Sink.head) .futureValue @@ -90,7 +104,7 @@ class AzureIntegrationTest extends StorageIntegrationSpec { val range = ByteRange(0, 8) val (maybeEventualObjectMetadata, eventualText) = FileService - .getFile(objectPath, GetFile().withRange(range)) + .getFile(fileObjectPath, GetFile().withRange(range)) .withAttributes(getDefaultAttributes) .via(framing) .map(_.utf8String) @@ -105,7 +119,7 @@ class AzureIntegrationTest extends StorageIntegrationSpec { "clear range" in { val range = ByteRange(16, 24) val maybeObjectMetadata = FileService - .clearRange(objectPath = objectPath, requestBuilder = ClearFileRange(range)) + .clearRange(objectPath = fileObjectPath, requestBuilder = ClearFileRange(range)) .withAttributes(getDefaultAttributes) .runWith(Sink.head) .futureValue @@ -116,10 +130,9 @@ class AzureIntegrationTest extends StorageIntegrationSpec { "delete file" in { val maybeObjectMetadata = FileService - .deleteFile(objectPath, DeleteFile()) + .deleteFile(fileObjectPath, DeleteFile()) .withAttributes(getDefaultAttributes) - .toMat(Sink.head)(Keep.right) - .run() + .runWith(Sink.head) .futureValue maybeObjectMetadata.get.contentLength shouldBe 0 @@ -128,7 +141,7 @@ class AzureIntegrationTest extends StorageIntegrationSpec { "get file after delete" in { val maybeObjectMetadata = FileService - .getProperties(objectPath, GetProperties()) + .getProperties(fileObjectPath, GetProperties()) .withAttributes(getDefaultAttributes) .toMat(Sink.head)(Keep.right) .run() @@ -136,5 +149,70 @@ class AzureIntegrationTest extends StorageIntegrationSpec { maybeObjectMetadata shouldBe empty } + + "test operations on multi level directory structure" in { + val directoryName = "sub-directory" + val directoryPath = s"$defaultDirectoryPath/$directoryName" + val filePath = s"$directoryPath/$fileName" + + // create directory + FileService + .createDirectory(directoryPath, CreateDirectory()) + .withAttributes(getDefaultAttributes) + .runWith(Sink.head) + .futureValue shouldBe defined + + // create file + FileService + .createFile(filePath, CreateFile(contentLength, ContentTypes.`text/plain(UTF-8)`)) + .withAttributes(getDefaultAttributes) + .runWith(Sink.head) + .futureValue shouldBe defined + + // update content + FileService + .updateRange( + objectPath = filePath, + requestBuilder = UpdateFileRange(ByteRange(0, contentLength - 1), ContentTypes.`text/plain(UTF-8)`), + payload = Source.single(ByteString(sampleText)) + ) + .withAttributes(getDefaultAttributes) + .runWith(Sink.head) + .futureValue shouldBe defined + + // get file properties + FileService + .getProperties(filePath, GetProperties()) + .withAttributes(getDefaultAttributes) + .runWith(Sink.head) + .futureValue + .map(_.contentLength) shouldBe Some(sampleText.length) + + // delete file + FileService + .deleteFile(filePath, DeleteFile()) + .withAttributes(getDefaultAttributes) + .runWith(Sink.head) + .futureValue shouldBe defined + + // delete directory + FileService + .deleteDirectory(directoryPath, DeleteDirectory()) + .withAttributes(getDefaultAttributes) + .runWith(Sink.head) + .futureValue shouldBe defined + } + + "delete directory" in { + val maybeObjectMetadata = FileService + .deleteDirectory(directoryPath = defaultDirectoryPath, requestBuilder = DeleteDirectory()) + .withAttributes(getDefaultAttributes) + .runWith(Sink.head) + .futureValue + + maybeObjectMetadata.isDefined shouldBe true + val objectMetadata = maybeObjectMetadata.get + objectMetadata.contentLength shouldBe 0 + } } } diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageIntegrationSpec.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageIntegrationSpec.scala index b8e2243334..597819402d 100644 --- a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageIntegrationSpec.scala +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageIntegrationSpec.scala @@ -7,7 +7,7 @@ package azure package storage package scaladsl -import akka.{Done, NotUsed} +import akka.NotUsed import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.ContentTypes @@ -33,7 +33,7 @@ import org.scalatest.wordspec.AnyWordSpecLike import java.security.MessageDigest import java.util.Base64 import scala.concurrent.duration._ -import scala.concurrent.{ExecutionContext, Future} +import scala.concurrent.ExecutionContext trait StorageIntegrationSpec extends AnyWordSpecLike @@ -44,10 +44,15 @@ trait StorageIntegrationSpec with LogCapturing { protected val defaultContainerName = "test-container" - protected val fileName = "sample-blob.txt" + // share was created manually + protected val defaultShareName = "test-share" + protected val defaultDirectoryName = "test-directory" + protected val defaultDirectoryPath = s"$defaultShareName/$defaultDirectoryName" + protected val fileName = "sample-file.txt" protected val sampleText: String = "The quick brown fox jumps over the lazy dog." + System.lineSeparator() protected val contentLength: Long = sampleText.length.toLong - protected val objectPath = s"$defaultContainerName/$fileName" + protected val blobObjectPath = s"$defaultContainerName/$fileName" + protected val fileObjectPath = s"$defaultDirectoryPath/$fileName" protected val framing: Flow[ByteString, ByteString, NotUsed] = Framing.delimiter(ByteString(System.lineSeparator()), 256, allowTruncation = true) @@ -80,7 +85,7 @@ trait StorageIntegrationSpec val maybeObjectMetadata = BlobService .putBlockBlob( - objectPath = objectPath, + objectPath = blobObjectPath, requestBuilder = PutBlockBlob(contentLength, ContentTypes.`text/plain(UTF-8)`), payload = Source.single(ByteString(sampleText)) ) @@ -96,7 +101,7 @@ trait StorageIntegrationSpec "get blob" in { val (maybeEventualObjectMetadata, eventualText) = BlobService - .getBlob(objectPath, GetBlob()) + .getBlob(blobObjectPath, GetBlob()) .withAttributes(getDefaultAttributes) .via(framing) .map(byteString => byteString.utf8String + System.lineSeparator()) @@ -112,7 +117,7 @@ trait StorageIntegrationSpec "get blob properties" in { val maybeObjectMetadata = BlobService - .getProperties(objectPath, GetProperties()) + .getProperties(blobObjectPath, GetProperties()) .withAttributes(getDefaultAttributes) .runWith(Sink.head) .futureValue @@ -127,7 +132,7 @@ trait StorageIntegrationSpec val range = ByteRange.Slice(0, 8) val (maybeEventualObjectMetadata, eventualText) = BlobService - .getBlob(objectPath, GetBlob().withRange(range)) + .getBlob(blobObjectPath, GetBlob().withRange(range)) .withAttributes(getDefaultAttributes) .via(framing) .map(_.utf8String) @@ -142,7 +147,7 @@ trait StorageIntegrationSpec "delete blob" in { val maybeObjectMetadata = BlobService - .deleteBlob(objectPath, DeleteBlob()) + .deleteBlob(blobObjectPath, DeleteBlob()) .withAttributes(getDefaultAttributes) .toMat(Sink.head)(Keep.right) .run() @@ -154,7 +159,7 @@ trait StorageIntegrationSpec "get blob after delete" in { val maybeObjectMetadata = BlobService - .getProperties(objectPath, GetProperties()) + .getProperties(blobObjectPath, GetProperties()) .withAttributes(getDefaultAttributes) .toMat(Sink.head)(Keep.right) .run() diff --git a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala index 7959d8ddbc..3da8616235 100644 --- a/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala +++ b/azure-storage/src/test/scala/akka/stream/alpakka/azure/storage/scaladsl/StorageWireMockBase.scala @@ -72,6 +72,27 @@ abstract class StorageWireMockBase(_system: ActorSystem, val _wireMockServer: Wi ) ) + protected def mockCreateDirectory(): StubMapping = + mock.register( + put(urlEqualTo(s"/$AccountName/$containerName?restype=directory")) + .willReturn( + aResponse() + .withStatus(201) + .withHeader(ETag.name, ETagValue) + .withHeader(`Content-Length`.name, "0") + ) + ) + + protected def mockDeleteDirectory(): StubMapping = + mock.register( + delete(urlEqualTo(s"/$AccountName/$containerName?restype=directory")) + .willReturn( + aResponse() + .withStatus(202) + .withHeader(`Content-Length`.name, "0") + ) + ) + protected def mockPutBlockBlob(): StubMapping = mock.register( put(urlEqualTo(s"/$AccountName/$containerName/$blobName")) diff --git a/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala b/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala index a3ac4eb5cc..93a0a792c8 100644 --- a/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala +++ b/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala @@ -306,6 +306,27 @@ class StorageSpec "AzureStorage File connector" should { + "create directory" in { + mockCreateDirectory() + + //#create-directory + import akka.stream.alpakka.azure.storage.scaladsl.FileService + import akka.stream.alpakka.azure.storage.ObjectMetadata + import akka.stream.alpakka.azure.storage.requests.CreateDirectory + + val source: Source[Option[ObjectMetadata], NotUsed] = + FileService.createDirectory(directoryPath = containerName, requestBuilder = CreateDirectory()) + + val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.runWith(Sink.head) + //#create-directory + + val maybeMetadata = eventualMaybeMetadata.futureValue + maybeMetadata shouldBe defined + val metadata = maybeMetadata.get + metadata.eTag shouldBe Some(ETagRawValue) + metadata.contentLength shouldBe 0L + } + "create file" in { mockCreateFile() @@ -439,5 +460,25 @@ class StorageSpec metadata.eTag shouldBe Some(ETagRawValue) metadata.contentLength shouldBe 0L } + + "delete directory" in { + mockDeleteDirectory() + + //#delete-directory + import akka.stream.alpakka.azure.storage.scaladsl.FileService + import akka.stream.alpakka.azure.storage.ObjectMetadata + import akka.stream.alpakka.azure.storage.requests.DeleteDirectory + + val source: Source[Option[ObjectMetadata], NotUsed] = + FileService.deleteDirectory(directoryPath = containerName, requestBuilder = DeleteDirectory()) + + val eventualMaybeMetadata: Future[Option[ObjectMetadata]] = source.runWith(Sink.head) + //#delete-directory + + val maybeMetadata = eventualMaybeMetadata.futureValue + maybeMetadata shouldBe defined + val metadata = maybeMetadata.get + metadata.contentLength shouldBe 0L + } } } diff --git a/docs/src/main/paradox/azure-storage.md b/docs/src/main/paradox/azure-storage.md index 365d631dd2..edb81cfbcf 100644 --- a/docs/src/main/paradox/azure-storage.md +++ b/docs/src/main/paradox/azure-storage.md @@ -125,7 +125,7 @@ Scala Java : @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.java) { #create-container } -### Create Container +### Delete Container The [`Delete Container`](https://learn.microsoft.com/en-us/rest/api/storageservices/delete-container) operation creates existing container under the specified account. @@ -135,7 +135,6 @@ Scala Java : @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.java) { #delete-container } - ### Put Block Blob The [`Put Block Blob`](https://learn.microsoft.com/en-us/rest/api/storageservices/put-blob) operation creates a new block or updates the content of an existing block blob. @@ -214,6 +213,26 @@ Scala Java : @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.java) { #clear-range } +### Create Directory + +The [`Create Directory`](https://learn.microsoft.com/en-us/rest/api/storageservices/create-directory) operation creates a new container under the specified account. + +Scala +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #create-directory } + +Java +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.java) { #create-directory } + +### Delete Directory + +The [`Delete Directory`](https://learn.microsoft.com/en-us/rest/api/storageservices/delete-directory) operation creates existing container under the specified account. + +Scala +: @@snip [snip](/azure-storage/src/test/scala/docs/scaladsl/StorageSpec.scala) { #delete-directory } + +Java +: @@snip [snip](/azure-storage/src/test/java/docs/javadsl/StorageTest.java) { #delete-directory } + ### Get File The [`Get File`](https://learn.microsoft.com/en-us/rest/api/storageservices/get-file) operation reads or downloads a file from the system, including its metadata and properties. From 43f347fb7e5e099003c4c31a937a2103cb26cb17 Mon Sep 17 00:00:00 2001 From: sfali Date: Mon, 30 Sep 2024 12:41:18 -0400 Subject: [PATCH 78/78] fix import #3253 --- .../alpakka/azure/storage/impl/AzureStorageStream.scala | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala index 2488bf5bfe..4e2dab42c2 100644 --- a/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala +++ b/azure-storage/src/main/scala/akka/stream/alpakka/azure/storage/impl/AzureStorageStream.scala @@ -9,7 +9,6 @@ package impl import akka.NotUsed import akka.actor.ActorSystem -import akka.dispatch.ExecutionContexts import akka.http.scaladsl.Http import akka.http.scaladsl.model.StatusCodes.{Accepted, Created, NotFound, OK} import akka.http.scaladsl.model.headers.{`Content-Length`, `Content-Type`} @@ -41,7 +40,7 @@ import akka.stream.scaladsl.{Flow, RetryFlow, Source} import akka.util.ByteString import java.time.Clock -import scala.concurrent.{Future, Promise} +import scala.concurrent.{ExecutionContext, Future, Promise} import scala.util.{Failure, Success, Try} object AzureStorageStream { @@ -70,7 +69,7 @@ object AzureStorageStream { } .mapMaterializedValue(_ => objectMetadataMat.future) } - .mapMaterializedValue(_.flatMap(identity)(ExecutionContexts.parasitic)) + .mapMaterializedValue(_.flatMap(identity)(ExecutionContext.parasitic)) private[storage] def getBlobProperties(objectPath: String, requestBuilder: GetProperties): Source[Option[ObjectMetadata], NotUsed] = @@ -189,7 +188,7 @@ object AzureStorageStream { .withoutSizeLimit() .discardBytes() .future() - .map(_ => None)(ExecutionContexts.parasitic) + .map(_ => None)(ExecutionContext.parasitic) ) case response: HttpResponse => Source.future(unmarshalError(response.status, response.entity)) }