Skip to content

Commit

Permalink
Merge pull request #279 from mziccard/fix-unset-metadata
Browse files Browse the repository at this point in the history
Provide a way to unset blob metadata
  • Loading branch information
aozarov committed Oct 23, 2015
2 parents eef07a7 + 682848a commit 33e932e
Show file tree
Hide file tree
Showing 4 changed files with 144 additions and 45 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -190,6 +190,16 @@ public Blob reload(BlobSourceOption... options) {
* made on the metadata generation of the current blob. If you want to update the information only
* if the current blob metadata are at their latest version use the {@code metagenerationMatch}
* option: {@code blob.update(newInfo, BlobTargetOption.metagenerationMatch())}.
* <p>
* Original metadata are merged with metadata in the provided {@code blobInfo}. To replace
* metadata instead you first have to unset them. Unsetting metadata can be done by setting the
* provided {@code blobInfo}'s metadata to {@code null}.
* </p>
* <p>
* Example usage of replacing blob's metadata:
* <pre> {@code blob.update(blob.info().toBuilder().metadata(null).build());}
* {@code blob.update(blob.info().toBuilder().metadata(newMetadata).build());}
* </pre>
*
* @param blobInfo new blob's information. Bucket and blob names must match the current ones
* @param options update options
Expand Down Expand Up @@ -306,8 +316,7 @@ public Storage storage() {
}

/**
* Gets the requested blobs. If {@code infos.length == 0} an empty list is returned. If
* {@code infos.length > 1} a batch request is used to fetch blobs.
* Gets the requested blobs. A batch request is used to fetch blobs.
*
* @param storage the storage service used to issue the request
* @param blobs the blobs to get
Expand All @@ -331,8 +340,12 @@ public Blob apply(BlobInfo f) {
}

/**
* Updates the requested blobs. If {@code infos.length == 0} an empty list is returned. If
* {@code infos.length > 1} a batch request is used to update blobs.
* Updates the requested blobs. A batch request is used to update blobs. Original metadata are
* merged with metadata in the provided {@code BlobInfo} objects. To replace metadata instead
* you first have to unset them. Unsetting metadata can be done by setting the provided
* {@code BlobInfo} objects metadata to {@code null}. See
* {@link #update(com.google.gcloud.storage.BlobInfo,
* com.google.gcloud.storage.Storage.BlobTargetOption...) } for a code example.
*
* @param storage the storage service used to issue the request
* @param infos the blobs to update
Expand All @@ -356,8 +369,7 @@ public Blob apply(BlobInfo f) {
}

/**
* Deletes the requested blobs. If {@code infos.length == 0} an empty list is returned. If
* {@code infos.length > 1} a batch request is used to delete blobs.
* Deletes the requested blobs. A batch request is used to delete blobs.
*
* @param storage the storage service used to issue the request
* @param blobs the blobs to delete
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,14 +27,19 @@
import com.google.common.base.Function;
import com.google.common.base.MoreObjects;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;

import java.io.Serializable;
import java.math.BigInteger;
import java.util.AbstractMap;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;

/**
* Google Storage object metadata.
Expand Down Expand Up @@ -81,6 +86,17 @@ public StorageObject apply(BlobInfo blobInfo) {
private final String contentLanguage;
private final Integer componentCount;

/**
* This class is meant for internal use only. Users are discouraged from using this class.
*/
public static final class ImmutableEmptyMap<K, V> extends AbstractMap<K, V> {

@Override
public Set<Entry<K, V>> entrySet() {
return ImmutableSet.of();
}
}

public static final class Builder {

private BlobId blobId;
Expand All @@ -99,7 +115,7 @@ public static final class Builder {
private String md5;
private String crc32c;
private String mediaLink;
private ImmutableMap<String, String> metadata;
private Map<String, String> metadata;
private Long generation;
private Long metageneration;
private Long deleteTime;
Expand Down Expand Up @@ -188,7 +204,8 @@ Builder mediaLink(String mediaLink) {
}

public Builder metadata(Map<String, String> metadata) {
this.metadata = metadata != null ? ImmutableMap.copyOf(metadata) : null;
this.metadata = metadata != null ?
new HashMap(metadata) : Data.<Map>nullOf(ImmutableEmptyMap.class);
return this;
}

Expand Down Expand Up @@ -315,7 +332,7 @@ public String mediaLink() {
}

public Map<String, String> metadata() {
return metadata;
return metadata == null || Data.isNull(metadata) ? null : Collections.unmodifiableMap(metadata);
}

public Long generation() {
Expand Down Expand Up @@ -402,14 +419,21 @@ public ObjectAccessControl apply(Acl acl) {
if (owner != null) {
storageObject.setOwner(new Owner().setEntity(owner.toPb()));
}
Map<String, String> pbMetadata = metadata;
if (metadata != null && !Data.isNull(metadata)) {
pbMetadata = Maps.newHashMapWithExpectedSize(metadata.size());
for (String key : metadata.keySet()) {
pbMetadata.put(key, firstNonNull(metadata.get(key), Data.<String>nullOf(String.class)));
}
}
storageObject.setMetadata(pbMetadata);
storageObject.setCacheControl(cacheControl);
storageObject.setContentEncoding(contentEncoding);
storageObject.setCrc32c(crc32c);
storageObject.setContentType(contentType);
storageObject.setGeneration(generation);
storageObject.setMd5Hash(md5);
storageObject.setMediaLink(mediaLink);
storageObject.setMetadata(metadata);
storageObject.setMetageneration(metageneration);
storageObject.setContentDisposition(contentDisposition);
storageObject.setComponentCount(componentCount);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -685,15 +685,29 @@ public static Builder builder() {
BucketInfo update(BucketInfo bucketInfo, BucketTargetOption... options);

/**
* Update blob information.
* Update blob information. Original metadata are merged with metadata in the provided
* {@code blobInfo}. To replace metadata instead you first have to unset them. Unsetting metadata
* can be done by setting the provided {@code blobInfo}'s metadata to {@code null}.
* <p>
* Example usage of replacing blob's metadata:
* <pre> {@code service.update(BlobInfo.builder("bucket", "name").metadata(null).build());}
* {@code service.update(BlobInfo.builder("bucket", "name").metadata(newMetadata).build());}
* </pre>
*
* @return the updated blob
* @throws StorageException upon failure
*/
BlobInfo update(BlobInfo blobInfo, BlobTargetOption... options);

/**
* Update blob information.
* Update blob information. Original metadata are merged with metadata in the provided
* {@code blobInfo}. To replace metadata instead you first have to unset them. Unsetting metadata
* can be done by setting the provided {@code blobInfo}'s metadata to {@code null}.
* <p>
* Example usage of replacing blob's metadata:
* <pre> {@code service.update(BlobInfo.builder("bucket", "name").metadata(null).build());}
* {@code service.update(BlobInfo.builder("bucket", "name").metadata(newMetadata).build());}
* </pre>
*
* @return the updated blob
* @throws StorageException upon failure
Expand Down Expand Up @@ -828,7 +842,11 @@ public static Builder builder() {
List<BlobInfo> get(BlobId... blobIds);

/**
* Updates the requested blobs. A batch request is used to perform this call.
* Updates the requested blobs. A batch request is used to perform this call. Original metadata
* are merged with metadata in the provided {@code BlobInfo} objects. To replace metadata instead
* you first have to unset them. Unsetting metadata can be done by setting the provided
* {@code BlobInfo} objects metadata to {@code null}. See
* {@link #update(com.google.gcloud.storage.BlobInfo)} for a code example.
*
* @param blobInfos blobs to update
* @return an immutable list of {@code BlobInfo} objects. If a blob does not exist or access to it
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import static org.junit.Assert.fail;

import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.gcloud.RestorableState;
import com.google.gcloud.storage.testing.RemoteGcsHelper;

Expand All @@ -36,8 +37,10 @@
import java.net.URLConnection;
import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
Expand Down Expand Up @@ -95,8 +98,7 @@ public void testCreateBlob() {
BlobInfo blob = BlobInfo.builder(bucket, blobName).build();
BlobInfo remoteBlob = storage.create(blob, BLOB_BYTE_CONTENT);
assertNotNull(remoteBlob);
assertEquals(blob.bucket(), remoteBlob.bucket());
assertEquals(blob.name(), remoteBlob.name());
assertEquals(blob.blobId(), remoteBlob.blobId());
byte[] readBytes = storage.readAllBytes(bucket, blobName);
assertArrayEquals(BLOB_BYTE_CONTENT, readBytes);
assertTrue(storage.delete(bucket, blobName));
Expand All @@ -108,8 +110,7 @@ public void testCreateEmptyBlob() {
BlobInfo blob = BlobInfo.builder(bucket, blobName).build();
BlobInfo remoteBlob = storage.create(blob);
assertNotNull(remoteBlob);
assertEquals(blob.bucket(), remoteBlob.bucket());
assertEquals(blob.name(), remoteBlob.name());
assertEquals(blob.blobId(), remoteBlob.blobId());
byte[] readBytes = storage.readAllBytes(bucket, blobName);
assertArrayEquals(new byte[0], readBytes);
assertTrue(storage.delete(bucket, blobName));
Expand All @@ -122,8 +123,7 @@ public void testCreateBlobStream() throws UnsupportedEncodingException {
ByteArrayInputStream stream = new ByteArrayInputStream(BLOB_STRING_CONTENT.getBytes(UTF_8));
BlobInfo remoteBlob = storage.create(blob, stream);
assertNotNull(remoteBlob);
assertEquals(blob.bucket(), remoteBlob.bucket());
assertEquals(blob.name(), remoteBlob.name());
assertEquals(blob.blobId(), remoteBlob.blobId());
assertEquals(blob.contentType(), remoteBlob.contentType());
byte[] readBytes = storage.readAllBytes(bucket, blobName);
assertEquals(BLOB_STRING_CONTENT, new String(readBytes, UTF_8));
Expand Down Expand Up @@ -168,12 +168,68 @@ public void testUpdateBlob() {
assertNotNull(storage.create(blob));
BlobInfo updatedBlob = storage.update(blob.toBuilder().contentType(CONTENT_TYPE).build());
assertNotNull(updatedBlob);
assertEquals(blob.bucket(), updatedBlob.bucket());
assertEquals(blob.name(), updatedBlob.name());
assertEquals(blob.blobId(), updatedBlob.blobId());
assertEquals(CONTENT_TYPE, updatedBlob.contentType());
assertTrue(storage.delete(bucket, blobName));
}

@Test
public void testUpdateBlobReplaceMetadata() {
String blobName = "test-update-blob-replace-metadata";
ImmutableMap<String, String> metadata = ImmutableMap.of("k1", "a");
ImmutableMap<String, String> newMetadata = ImmutableMap.of("k2", "b");
BlobInfo blob = BlobInfo.builder(bucket, blobName)
.contentType(CONTENT_TYPE)
.metadata(metadata)
.build();
assertNotNull(storage.create(blob));
BlobInfo updatedBlob = storage.update(blob.toBuilder().metadata(null).build());
assertNotNull(updatedBlob);
assertNull(updatedBlob.metadata());
updatedBlob = storage.update(blob.toBuilder().metadata(newMetadata).build());
assertEquals(blob.blobId(), updatedBlob.blobId());
assertEquals(newMetadata, updatedBlob.metadata());
assertTrue(storage.delete(bucket, blobName));
}

@Test
public void testUpdateBlobMergeMetadata() {
String blobName = "test-update-blob-merge-metadata";
ImmutableMap<String, String> metadata = ImmutableMap.of("k1", "a");
ImmutableMap<String, String> newMetadata = ImmutableMap.of("k2", "b");
ImmutableMap<String, String> expectedMetadata = ImmutableMap.of("k1", "a", "k2", "b");
BlobInfo blob = BlobInfo.builder(bucket, blobName)
.contentType(CONTENT_TYPE)
.metadata(metadata)
.build();
assertNotNull(storage.create(blob));
BlobInfo updatedBlob = storage.update(blob.toBuilder().metadata(newMetadata).build());
assertNotNull(updatedBlob);
assertEquals(blob.blobId(), updatedBlob.blobId());
assertEquals(expectedMetadata, updatedBlob.metadata());
assertTrue(storage.delete(bucket, blobName));
}

@Test
public void testUpdateBlobUnsetMetadata() {
String blobName = "test-update-blob-unset-metadata";
ImmutableMap<String, String> metadata = ImmutableMap.of("k1", "a", "k2", "b");
Map<String, String> newMetadata = new HashMap<>();
newMetadata.put("k1", "a");
newMetadata.put("k2", null);
ImmutableMap<String, String> expectedMetadata = ImmutableMap.of("k1", "a");
BlobInfo blob = BlobInfo.builder(bucket, blobName)
.contentType(CONTENT_TYPE)
.metadata(metadata)
.build();
assertNotNull(storage.create(blob));
BlobInfo updatedBlob = storage.update(blob.toBuilder().metadata(newMetadata).build());
assertNotNull(updatedBlob);
assertEquals(blob.blobId(), updatedBlob.blobId());
assertEquals(expectedMetadata, updatedBlob.metadata());
assertTrue(storage.delete(bucket, blobName));
}

@Test
public void testUpdateBlobFail() {
String blobName = "test-update-blob-fail";
Expand Down Expand Up @@ -223,8 +279,7 @@ public void testComposeBlob() {
Storage.ComposeRequest.of(ImmutableList.of(sourceBlobName1, sourceBlobName2), targetBlob);
BlobInfo remoteBlob = storage.compose(req);
assertNotNull(remoteBlob);
assertEquals(bucket, remoteBlob.bucket());
assertEquals(targetBlobName, remoteBlob.name());
assertEquals(targetBlob.blobId(), remoteBlob.blobId());
byte[] readBytes = storage.readAllBytes(bucket, targetBlobName);
byte[] composedBytes = Arrays.copyOf(BLOB_BYTE_CONTENT, BLOB_BYTE_CONTENT.length * 2);
System.arraycopy(BLOB_BYTE_CONTENT, 0, composedBytes, BLOB_BYTE_CONTENT.length,
Expand Down Expand Up @@ -288,8 +343,7 @@ public void testCopyBlobUpdateMetadata() {
Storage.CopyRequest req = Storage.CopyRequest.of(bucket, sourceBlobName, targetBlob);
BlobInfo remoteBlob = storage.copy(req);
assertNotNull(remoteBlob);
assertEquals(bucket, remoteBlob.bucket());
assertEquals(targetBlobName, remoteBlob.name());
assertEquals(targetBlob.blobId(), remoteBlob.blobId());
assertEquals(CONTENT_TYPE, remoteBlob.contentType());
assertTrue(storage.delete(bucket, sourceBlobName));
assertTrue(storage.delete(bucket, targetBlobName));
Expand Down Expand Up @@ -337,10 +391,8 @@ public void testBatchRequest() {
assertEquals(0, updateResponse.gets().size());
BlobInfo remoteUpdatedBlob1 = updateResponse.updates().get(0).get();
BlobInfo remoteUpdatedBlob2 = updateResponse.updates().get(1).get();
assertEquals(bucket, remoteUpdatedBlob1.bucket());
assertEquals(bucket, remoteUpdatedBlob2.bucket());
assertEquals(updatedBlob1.name(), remoteUpdatedBlob1.name());
assertEquals(updatedBlob2.name(), remoteUpdatedBlob2.name());
assertEquals(sourceBlob1.blobId(), remoteUpdatedBlob1.blobId());
assertEquals(sourceBlob2.blobId(), remoteUpdatedBlob2.blobId());
assertEquals(updatedBlob1.contentType(), remoteUpdatedBlob1.contentType());
assertEquals(updatedBlob2.contentType(), remoteUpdatedBlob2.contentType());

Expand Down Expand Up @@ -515,8 +567,7 @@ public void testPostSignedUrl() throws IOException {
connection.connect();
BlobInfo remoteBlob = storage.get(bucket, blobName);
assertNotNull(remoteBlob);
assertEquals(bucket, remoteBlob.bucket());
assertEquals(blob.name(), remoteBlob.name());
assertEquals(blob.blobId(), remoteBlob.blobId());
assertTrue(storage.delete(bucket, blobName));
}

Expand All @@ -528,11 +579,9 @@ public void testGetBlobs() {
BlobInfo sourceBlob2 = BlobInfo.builder(bucket, sourceBlobName2).build();
assertNotNull(storage.create(sourceBlob1));
assertNotNull(storage.create(sourceBlob2));
List<BlobInfo> remoteInfos = storage.get(sourceBlob1.blobId(), sourceBlob2.blobId());
assertEquals(sourceBlob1.bucket(), remoteInfos.get(0).bucket());
assertEquals(sourceBlob1.name(), remoteInfos.get(0).name());
assertEquals(sourceBlob2.bucket(), remoteInfos.get(1).bucket());
assertEquals(sourceBlob2.name(), remoteInfos.get(1).name());
List<BlobInfo> remoteBlobs = storage.get(sourceBlob1.blobId(), sourceBlob2.blobId());
assertEquals(sourceBlob1.blobId(), remoteBlobs.get(0).blobId());
assertEquals(sourceBlob2.blobId(), remoteBlobs.get(1).blobId());
assertTrue(storage.delete(bucket, sourceBlobName1));
assertTrue(storage.delete(bucket, sourceBlobName2));
}
Expand All @@ -545,8 +594,7 @@ public void testGetBlobsFail() {
BlobInfo sourceBlob2 = BlobInfo.builder(bucket, sourceBlobName2).build();
assertNotNull(storage.create(sourceBlob1));
List<BlobInfo> remoteBlobs = storage.get(sourceBlob1.blobId(), sourceBlob2.blobId());
assertEquals(sourceBlob1.bucket(), remoteBlobs.get(0).bucket());
assertEquals(sourceBlob1.name(), remoteBlobs.get(0).name());
assertEquals(sourceBlob1.blobId(), remoteBlobs.get(0).blobId());
assertNull(remoteBlobs.get(1));
assertTrue(storage.delete(bucket, sourceBlobName1));
}
Expand Down Expand Up @@ -589,11 +637,9 @@ public void testUpdateBlobs() {
List<BlobInfo> updatedBlobs = storage.update(
remoteBlob1.toBuilder().contentType(CONTENT_TYPE).build(),
remoteBlob2.toBuilder().contentType(CONTENT_TYPE).build());
assertEquals(sourceBlob1.bucket(), updatedBlobs.get(0).bucket());
assertEquals(sourceBlob1.name(), updatedBlobs.get(0).name());
assertEquals(sourceBlob1.blobId(), updatedBlobs.get(0).blobId());
assertEquals(CONTENT_TYPE, updatedBlobs.get(0).contentType());
assertEquals(sourceBlob2.bucket(), updatedBlobs.get(1).bucket());
assertEquals(sourceBlob2.name(), updatedBlobs.get(1).name());
assertEquals(sourceBlob2.blobId(), updatedBlobs.get(1).blobId());
assertEquals(CONTENT_TYPE, updatedBlobs.get(1).contentType());
assertTrue(storage.delete(bucket, sourceBlobName1));
assertTrue(storage.delete(bucket, sourceBlobName2));
Expand All @@ -610,8 +656,7 @@ public void testUpdateBlobsFail() {
List<BlobInfo> updatedBlobs = storage.update(
remoteBlob1.toBuilder().contentType(CONTENT_TYPE).build(),
sourceBlob2.toBuilder().contentType(CONTENT_TYPE).build());
assertEquals(sourceBlob1.bucket(), updatedBlobs.get(0).bucket());
assertEquals(sourceBlob1.name(), updatedBlobs.get(0).name());
assertEquals(sourceBlob1.blobId(), updatedBlobs.get(0).blobId());
assertEquals(CONTENT_TYPE, updatedBlobs.get(0).contentType());
assertNull(updatedBlobs.get(1));
assertTrue(storage.delete(bucket, sourceBlobName1));
Expand Down

0 comments on commit 33e932e

Please sign in to comment.