Skip to content

Commit

Permalink
Adjust encoding of Azure block IDs (#68980)
Browse files Browse the repository at this point in the history
Today we represent block IDs sent to Azure using the URL-safe base-64
encoding. This makes sense: these IDs appear in URLs. It turns out that
Azure rejects this encoding for block IDs and instead demands that they
are represented using the regular, URL-unsafe, base-64 encoding instead,
then further wrapped in %-encoding to deal with the URL-unsafe
characters that inevitably result.

Relates #66489
Backport of #68957
  • Loading branch information
DaveCTurner authored Feb 15, 2021
1 parent 7daea1c commit e7c0836
Show file tree
Hide file tree
Showing 5 changed files with 43 additions and 7 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Base64;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
Expand Down Expand Up @@ -448,11 +449,13 @@ private void executeMultipartUpload(String blobName, InputStream inputStream, lo
assert blobSize == (((nbParts - 1) * partSize) + lastPartSize) : "blobSize does not match multipart sizes";

final List<String> blockIds = new ArrayList<>(nbParts);
final Base64.Encoder base64Encoder = Base64.getEncoder().withoutPadding();
final Base64.Decoder base64UrlDecoder = Base64.getUrlDecoder();
for (int i = 0; i < nbParts; i++) {
final long length = i < nbParts - 1 ? partSize : lastPartSize;
Flux<ByteBuffer> byteBufferFlux = convertStreamToByteBuffer(inputStream, length, DEFAULT_UPLOAD_BUFFERS_SIZE);

final String blockId = UUIDs.base64UUID();
final String blockId = base64Encoder.encodeToString(base64UrlDecoder.decode(UUIDs.base64UUID()));
blockBlobAsyncClient.stageBlock(blockId, byteBufferFlux, length).block();
blockIds.add(blockId);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -341,9 +341,11 @@ public void testWriteLargeBlob() throws Exception {

if ("PUT".equals(exchange.getRequestMethod())) {
final Map<String, String> params = new HashMap<>();
RestUtils.decodeQueryString(exchange.getRequestURI().getQuery(), 0, params);
RestUtils.decodeQueryString(exchange.getRequestURI().getRawQuery(), 0, params);

final String blockId = params.get("blockid");
assert Strings.hasText(blockId) == false || AzureFixtureHelper.assertValidBlockId(blockId);

if (Strings.hasText(blockId) && (countDownUploads.decrementAndGet() % 2 == 0)) {
blocks.put(blockId, Streams.readFully(exchange.getRequestBody()));
exchange.sendResponseHeaders(RestStatus.CREATED.getStatus(), -1);
Expand Down
1 change: 1 addition & 0 deletions test/fixtures/azure-fixture/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ tasks.named("test").configure { enabled = false }

dependencies {
api project(':server')
api project(':test:framework')
}

tasks.named("preProcessFixture").configure {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,22 +22,20 @@
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import java.time.ZoneOffset;
import java.time.ZonedDateTime;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Collectors;

import static org.elasticsearch.repositories.azure.AzureFixtureHelper.assertValidBlockId;

/**
* Minimal HTTP handler that acts as an Azure compliant server
*/
Expand All @@ -64,9 +62,10 @@ public void handle(final HttpExchange exchange) throws IOException {
if (Regex.simpleMatch("PUT /" + account + "/" + container + "/*blockid=*", request)) {
// Put Block (https://docs.microsoft.com/en-us/rest/api/storageservices/put-block)
final Map<String, String> params = new HashMap<>();
RestUtils.decodeQueryString(exchange.getRequestURI().getQuery(), 0, params);
RestUtils.decodeQueryString(exchange.getRequestURI().getRawQuery(), 0, params);

final String blockId = params.get("blockid");
assert assertValidBlockId(blockId);
blobs.put(blockId, Streams.readFully(exchange.getRequestBody()));
exchange.sendResponseHeaders(RestStatus.CREATED.getStatus(), -1);

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/

package org.elasticsearch.repositories.azure;

import org.elasticsearch.common.Strings;

import java.util.Base64;

public class AzureFixtureHelper {
private AzureFixtureHelper() {
}

public static boolean assertValidBlockId(String blockId) {
assert Strings.hasText(blockId) : "blockId missing";
try {
final byte[] decode = Base64.getDecoder().decode(blockId);
// all block IDs for a blob must be the same length and <64 bytes prior to decoding.
// Elasticsearch generates them all to be 15 bytes long so we can just assert that:
assert decode.length == 15 : "blockid [" + blockId + "] decodes to [" + decode.length + "] bytes";
} catch (Exception e) {
assert false : new AssertionError("blockid [" + blockId + "] is not in base64", e);
}
return true;
}
}

0 comments on commit e7c0836

Please sign in to comment.