Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add custom metadata to snapshots #41281

Merged
merged 33 commits into from
Jun 5, 2019
Merged
Show file tree
Hide file tree
Changes from 32 commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
a3f3974
Add user-defined metadata to snapshots
gwbrown Apr 16, 2019
f7d9e10
Merge branch 'master' into snapshot-user-metadata
gwbrown Apr 17, 2019
26b1457
Check versions in stream methods
gwbrown Apr 17, 2019
62d2c03
Handle null metadata
gwbrown Apr 17, 2019
4d6ea78
Add metadata getter
gwbrown Apr 17, 2019
7ce8974
Add tests
gwbrown Apr 17, 2019
2337d06
No more NOCOMMIT! Now integrated into existing tests
gwbrown Apr 17, 2019
b727379
Line lengths, formatting, license headers
gwbrown Apr 17, 2019
84b39fd
Use 8.0 version for earliest version
gwbrown Apr 18, 2019
7ab2b70
Merge branch 'master' into snapshot-user-metadata
gwbrown Apr 18, 2019
30a44c4
Cleanup
gwbrown Apr 19, 2019
b098c7d
Docs
gwbrown Apr 19, 2019
4136cd2
Merge branch 'master' into snapshot-user-metadata
gwbrown Apr 19, 2019
e44ebf7
Add _meta to HLRC tests
gwbrown Apr 22, 2019
20f23ea
Add yaml test
gwbrown Apr 22, 2019
0793c32
Fix yaml test
gwbrown Apr 22, 2019
0abbf35
Merge branch 'master' into snapshot-user-metadata
gwbrown Apr 22, 2019
a265a2d
Merge branch 'master' into snapshot-user-metadata
gwbrown May 20, 2019
a6ab70c
Merge commit '78259f75de92e41623ec6dd02f9772b5340c86ae' into snapshot…
gwbrown May 21, 2019
337d163
Merge commit '148df31639a983058b758f5eef2c9df2f9346e94' into snapshot…
gwbrown May 22, 2019
726ef88
Add a limit on the size of the metadata field
gwbrown May 23, 2019
1638898
Use nested _meta + factor out metadata generation
gwbrown May 24, 2019
b717b69
Don't wrap metadata map per review
gwbrown May 24, 2019
4541985
Rename _meta to metadata
gwbrown May 24, 2019
31b48f7
Merge commit 'ffa5461b7f6bbc5d6587fd181591b710bd53a543' into snapshot…
gwbrown May 24, 2019
b8592ea
_meta -> metadata in yaml test
gwbrown May 26, 2019
eeadd65
Merge branch 'master' into snapshot-user-metadata
gwbrown May 28, 2019
348d6bd
Uncomment lines accidentally left commented
gwbrown May 28, 2019
0f4f9fd
Fix & comment getRandomFieldsExcludeFilter
gwbrown May 28, 2019
61588bb
Fix assertion in client test case
gwbrown May 28, 2019
dc8f0b2
Merge branch 'master' into snapshot-user-metadata
gwbrown May 28, 2019
fe70c1e
Merge branch 'master' into snapshot-user-metadata
elasticmachine Jun 3, 2019
d9d4388
Move metadata size limit check upstream per review
gwbrown Jun 4, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,12 @@
import org.elasticsearch.repositories.fs.FsRepository;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.snapshots.RestoreInfo;
import org.elasticsearch.snapshots.SnapshotInfo;

import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.stream.Collectors;

import static org.hamcrest.Matchers.contains;
Expand Down Expand Up @@ -139,6 +142,9 @@ public void testCreateSnapshot() throws IOException {
CreateSnapshotRequest request = new CreateSnapshotRequest(repository, snapshot);
boolean waitForCompletion = randomBoolean();
request.waitForCompletion(waitForCompletion);
if (randomBoolean()) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It would be nice to avoid copy-pasting metadata creation logic, I'd instead refactor out createSnapshotRequest method that accepts a boolean whether to include metadata to it.

request.userMetadata(randomUserMetadata());
}
request.partial(randomBoolean());
request.includeGlobalState(randomBoolean());

Expand Down Expand Up @@ -167,6 +173,8 @@ public void testGetSnapshots() throws IOException {
CreateSnapshotResponse putSnapshotResponse1 = createTestSnapshot(createSnapshotRequest1);
CreateSnapshotRequest createSnapshotRequest2 = new CreateSnapshotRequest(repository, snapshot2);
createSnapshotRequest2.waitForCompletion(true);
Map<String, Object> originalMetadata = randomUserMetadata();
createSnapshotRequest2.userMetadata(originalMetadata);
CreateSnapshotResponse putSnapshotResponse2 = createTestSnapshot(createSnapshotRequest2);
// check that the request went ok without parsing JSON here. When using the high level client, check acknowledgement instead.
assertEquals(RestStatus.OK, putSnapshotResponse1.status());
Expand All @@ -186,6 +194,12 @@ public void testGetSnapshots() throws IOException {
assertEquals(2, response.getSnapshots().size());
assertThat(response.getSnapshots().stream().map((s) -> s.snapshotId().getName()).collect(Collectors.toList()),
contains("test_snapshot1", "test_snapshot2"));
response.getSnapshots().stream()
.filter(s -> s.snapshotId().getName().equals("test_snapshot2"))
.findFirst()
.map(SnapshotInfo::userMetadata)
.ifPresentOrElse(metadata -> assertEquals(originalMetadata, metadata),
() -> assertNull("retrieved metadata is null, expected non-null metadata", originalMetadata));
}

public void testSnapshotsStatus() throws IOException {
Expand Down Expand Up @@ -231,6 +245,9 @@ public void testRestoreSnapshot() throws IOException {
CreateSnapshotRequest createSnapshotRequest = new CreateSnapshotRequest(testRepository, testSnapshot);
createSnapshotRequest.indices(testIndex);
createSnapshotRequest.waitForCompletion(true);
if (randomBoolean()) {
createSnapshotRequest.userMetadata(randomUserMetadata());
}
CreateSnapshotResponse createSnapshotResponse = createTestSnapshot(createSnapshotRequest);
assertEquals(RestStatus.OK, createSnapshotResponse.status());

Expand Down Expand Up @@ -261,6 +278,9 @@ public void testDeleteSnapshot() throws IOException {

CreateSnapshotRequest createSnapshotRequest = new CreateSnapshotRequest(repository, snapshot);
createSnapshotRequest.waitForCompletion(true);
if (randomBoolean()) {
createSnapshotRequest.userMetadata(randomUserMetadata());
}
CreateSnapshotResponse createSnapshotResponse = createTestSnapshot(createSnapshotRequest);
// check that the request went ok without parsing JSON here. When using the high level client, check acknowledgement instead.
assertEquals(RestStatus.OK, createSnapshotResponse.status());
Expand All @@ -270,4 +290,28 @@ public void testDeleteSnapshot() throws IOException {

assertTrue(response.isAcknowledged());
}

private static Map<String, Object> randomUserMetadata() {
if (randomBoolean()) {
return null;
}

Map<String, Object> metadata = new HashMap<>();
long fields = randomLongBetween(0, 4);
for (int i = 0; i < fields; i++) {
if (randomBoolean()) {
metadata.put(randomValueOtherThanMany(metadata::containsKey, () -> randomAlphaOfLengthBetween(2,10)),
randomAlphaOfLengthBetween(5, 5));
} else {
Map<String, Object> nested = new HashMap<>();
long nestedFields = randomLongBetween(0, 4);
for (int j = 0; j < nestedFields; j++) {
nested.put(randomValueOtherThanMany(nested::containsKey, () -> randomAlphaOfLengthBetween(2,10)),
randomAlphaOfLengthBetween(5, 5));
}
metadata.put(randomValueOtherThanMany(metadata::containsKey, () -> randomAlphaOfLengthBetween(2,10)), nested);
}
}
return metadata;
}
}
9 changes: 8 additions & 1 deletion docs/reference/modules/snapshots.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -349,7 +349,11 @@ PUT /_snapshot/my_backup/snapshot_2?wait_for_completion=true
{
"indices": "index_1,index_2",
"ignore_unavailable": true,
"include_global_state": false
"include_global_state": false,
"_meta": {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wonder if we should somehow limit the size of this? :) It seems to me there's quite a bit of potential for misusing this. Not sure what others think, maybe @ywelsch has an opinion?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree that the size of meta should be limited

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That does seem like a good idea - I don't see limiting the number of fields being terribly useful, perhaps a limit on the number of bytes in serialized-to-JSON form?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@gibrown yea limiting the size in bytes makes sense imo.

"taken_by": "kimchy",
"taken_because": "backup before upgrading"
}
}
-----------------------------------
// CONSOLE
Expand All @@ -363,6 +367,9 @@ By setting `include_global_state` to false it's possible to prevent the cluster
the snapshot. By default, the entire snapshot will fail if one or more indices participating in the snapshot don't have
all primary shards available. This behaviour can be changed by setting `partial` to `true`.

The `_meta` field can be used to attach arbitrary metadata to the snapshot. This may be a record of who took the snapshot,
why it was taken, or any other data that might be useful.

Snapshot names can be automatically derived using <<date-math-index-names,date math expressions>>, similarly as when creating
new indices. Note that special characters need to be URI encoded.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@ setup:
- is_false: snapshots.0.failures
- is_false: snapshots.0.shards
- is_false: snapshots.0.version
- is_false: snapshots.0._meta

- do:
snapshot.delete:
Expand Down Expand Up @@ -149,3 +150,41 @@ setup:
snapshot.delete:
repository: test_repo_get_1
snapshot: test_snapshot_without_include_global_state

---
"Get snapshot info with metadata":
- skip:
version: " - 7.9.99"
reason: "https://github.com/elastic/elasticsearch/pull/41281 not yet backported to 7.x"

- do:
indices.create:
index: test_index
body:
settings:
number_of_shards: 1
number_of_replicas: 0

- do:
snapshot.create:
repository: test_repo_get_1
snapshot: test_snapshot_with_metadata
wait_for_completion: true
body: |
{ "metadata": {"taken_by": "test", "foo": {"bar": "baz"}} }
- do:
snapshot.get:
repository: test_repo_get_1
snapshot: test_snapshot_with_metadata

- is_true: snapshots
- match: { snapshots.0.snapshot: test_snapshot_with_metadata }
- match: { snapshots.0.state: SUCCESS }
- match: { snapshots.0.metadata.taken_by: test }
- match: { snapshots.0.metadata.foo.bar: baz }

- do:
snapshot.delete:
repository: test_repo_get_1
snapshot: test_snapshot_with_metadata
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,14 @@

package org.elasticsearch.action.admin.cluster.snapshots.create;

import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.MasterNodeRequest;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
Expand All @@ -46,6 +48,7 @@
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
import static org.elasticsearch.snapshots.SnapshotInfo.METADATA_FIELD_INTRODUCED;

/**
* Create snapshot request
Expand All @@ -63,6 +66,7 @@
*/
public class CreateSnapshotRequest extends MasterNodeRequest<CreateSnapshotRequest>
implements IndicesRequest.Replaceable, ToXContentObject {
public static int MAXIMUM_METADATA_BYTES = 1024; // chosen arbitrarily
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we test it somewhere?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.


private String snapshot;

Expand All @@ -80,6 +84,8 @@ public class CreateSnapshotRequest extends MasterNodeRequest<CreateSnapshotReque

private boolean waitForCompletion;

private Map<String, Object> userMetadata;

public CreateSnapshotRequest() {
}

Expand All @@ -104,6 +110,9 @@ public CreateSnapshotRequest(StreamInput in) throws IOException {
includeGlobalState = in.readBoolean();
waitForCompletion = in.readBoolean();
partial = in.readBoolean();
if (in.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) {
userMetadata = in.readMap();
}
}

@Override
Expand All @@ -117,6 +126,9 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeBoolean(includeGlobalState);
out.writeBoolean(waitForCompletion);
out.writeBoolean(partial);
if (out.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) {
out.writeMap(userMetadata);
}
}

@Override
Expand Down Expand Up @@ -144,9 +156,26 @@ public ActionRequestValidationException validate() {
if (settings == null) {
validationException = addValidationError("settings is null", validationException);
}
if (isMetadataTooBig(userMetadata)) {
validationException = addValidationError("metadata must be smaller than 1024 bytes", validationException);
}
return validationException;
}

private static boolean isMetadataTooBig(Map<String, Object> userMetadata) {
if (userMetadata == null) {
return false;
}
try (XContentBuilder builder = XContentFactory.jsonBuilder()) {
builder.value(userMetadata);
int size = BytesReference.bytes(builder).length();
return size > MAXIMUM_METADATA_BYTES;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe we could just return the size of the meta here and then do the check upstream? That way we could add the serialized size of the metadata to the exception which might be helpful when debugging things in the future?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I've made this change, it's a good suggestion.

} catch (IOException e) {
// This should not be possible as we are just rendering the xcontent in memory
throw new ElasticsearchException(e);
}
}

/**
* Sets the snapshot name
*
Expand Down Expand Up @@ -378,6 +407,15 @@ public boolean includeGlobalState() {
return includeGlobalState;
}

public Map<String, Object> userMetadata() {
return userMetadata;
}

public CreateSnapshotRequest userMetadata(Map<String, Object> userMetadata) {
this.userMetadata = userMetadata;
return this;
}

/**
* Parses snapshot definition.
*
Expand Down Expand Up @@ -405,6 +443,11 @@ public CreateSnapshotRequest source(Map<String, Object> source) {
settings((Map<String, Object>) entry.getValue());
} else if (name.equals("include_global_state")) {
includeGlobalState = nodeBooleanValue(entry.getValue(), "include_global_state");
} else if (name.equals("metadata")) {
if (entry.getValue() != null && (entry.getValue() instanceof Map == false)) {
throw new IllegalArgumentException("malformed metadata, should be an object");
}
userMetadata((Map<String, Object>) entry.getValue());
}
}
indicesOptions(IndicesOptions.fromMap(source, indicesOptions));
Expand Down Expand Up @@ -433,6 +476,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
if (indicesOptions != null) {
indicesOptions.toXContent(builder, params);
}
builder.field("metadata", userMetadata);
builder.endObject();
return builder;
}
Expand Down Expand Up @@ -460,12 +504,14 @@ public boolean equals(Object o) {
Arrays.equals(indices, that.indices) &&
Objects.equals(indicesOptions, that.indicesOptions) &&
Objects.equals(settings, that.settings) &&
Objects.equals(masterNodeTimeout, that.masterNodeTimeout);
Objects.equals(masterNodeTimeout, that.masterNodeTimeout) &&
Objects.equals(userMetadata, that.userMetadata);
}

@Override
public int hashCode() {
int result = Objects.hash(snapshot, repository, indicesOptions, partial, settings, includeGlobalState, waitForCompletion);
int result = Objects.hash(snapshot, repository, indicesOptions, partial, settings, includeGlobalState,
waitForCompletion, userMetadata);
result = 31 * result + Arrays.hashCode(indices);
return result;
}
Expand All @@ -482,6 +528,7 @@ public String toString() {
", includeGlobalState=" + includeGlobalState +
", waitForCompletion=" + waitForCompletion +
", masterNodeTimeout=" + masterNodeTimeout +
", metadata=" + userMetadata +
'}';
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@

import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
Expand Down Expand Up @@ -117,4 +118,9 @@ public boolean equals(Object o) {
public int hashCode() {
return Objects.hash(snapshots);
}

@Override
public String toString() {
return Strings.toString(this);
}
}
Loading