Skip to content

Commit

Permalink
Include size of snapshot in snapshot metadata (#29602)
Browse files Browse the repository at this point in the history
Adds difference of number of files (and file sizes) between prev and current snapshot. Total number/size reflects total number/size of files in snapshot.

Closes #18543

(cherry picked from commit 81eb8ba)
  • Loading branch information
Vladimir Dolzhenko committed May 25, 2018
1 parent f561bad commit b006f3b
Show file tree
Hide file tree
Showing 13 changed files with 512 additions and 120 deletions.
13 changes: 13 additions & 0 deletions docs/reference/migration/migrate_7_0/snapshotstats.asciidoc
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
[[breaking_70_snapshotstats_changes]]
=== Snapshot stats changes

Snapshot stats details are provided in a new structured way:

* `total` section for all the files that are referenced by the snapshot.
* `incremental` section for those files that actually needed to be copied over as part of the incremental snapshotting.
* In case of a snapshot that's still in progress, there's also a `processed` section for files that are in the process of being copied.

==== Deprecated `number_of_files`, `processed_files`, `total_size_in_bytes` and `processed_size_in_bytes` snapshot stats properties have been removed

* Properties `number_of_files` and `total_size_in_bytes` are removed and should be replaced by values of nested object `total`.
* Properties `processed_files` and `processed_size_in_bytes` are removed and should be replaced by values of nested object `processed`.
56 changes: 56 additions & 0 deletions docs/reference/modules/snapshots.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -563,6 +563,62 @@ GET /_snapshot/my_backup/snapshot_1/_status
// CONSOLE
// TEST[continued]

The output looks similar to the following:

[source,js]
--------------------------------------------------
{
"snapshots": [
{
"snapshot": "snapshot_1",
"repository": "my_backup",
"uuid": "XuBo4l4ISYiVg0nYUen9zg",
"state": "SUCCESS",
"include_global_state": true,
"shards_stats": {
"initializing": 0,
"started": 0,
"finalizing": 0,
"done": 5,
"failed": 0,
"total": 5
},
"stats": {
"incremental": {
"file_count": 8,
"size_in_bytes": 4704
},
"processed": {
"file_count": 7,
"size_in_bytes": 4254
},
"total": {
"file_count": 8,
"size_in_bytes": 4704
},
"start_time_in_millis": 1526280280355,
"time_in_millis": 358,
"number_of_files": 8,
"processed_files": 8,
"total_size_in_bytes": 4704,
"processed_size_in_bytes": 4704
}
}
]
}
--------------------------------------------------
// TESTRESPONSE

The output is composed of different sections. The `stats` sub-object provides details on the number and size of files that were
snapshotted. As snapshots are incremental, copying only the Lucene segments that are not already in the repository,
the `stats` object contains a `total` section for all the files that are referenced by the snapshot, as well as an `incremental` section
for those files that actually needed to be copied over as part of the incremental snapshotting. In case of a snapshot that's still
in progress, there's also a `processed` section that contains information about the files that are in the process of being copied.

_Note_: Properties `number_of_files`, `processed_files`, `total_size_in_bytes` and `processed_size_in_bytes` are used for
backward compatibility reasons with older 5.x and 6.x versions. These fields will be removed in Elasticsearch v7.0.0.

Multiple ids are also supported:

[source,sh]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,9 @@ setup:

---
"Get snapshot status":

- skip:
version: " - 6.99.99"
reason: "backporting in progress: https://github.com/elastic/elasticsearch/pull/29602"
- do:
indices.create:
index: test_index
Expand All @@ -32,6 +34,42 @@ setup:
snapshot: test_snapshot

- is_true: snapshots
- match: { snapshots.0.snapshot: test_snapshot }
- match: { snapshots.0.state: SUCCESS }
- gt: { snapshots.0.stats.incremental.file_count: 0 }
- gt: { snapshots.0.stats.incremental.size_in_bytes: 0 }
- gt: { snapshots.0.stats.total.file_count: 0 }
- is_true: snapshots.0.stats.start_time_in_millis
- is_true: snapshots.0.stats.time_in_millis

---
"Get snapshot status with BWC fields":
- do:
indices.create:
index: test_index
body:
settings:
number_of_shards: 1
number_of_replicas: 0

- do:
snapshot.create:
repository: test_repo_status_1
snapshot: test_snapshot_bwc
wait_for_completion: true

- do:
snapshot.status:
repository: test_repo_status_1
snapshot: test_snapshot_bwc

- is_true: snapshots
- match: { snapshots.0.snapshot: test_snapshot_bwc }
- match: { snapshots.0.state: SUCCESS }
- gt: { snapshots.0.stats.number_of_files: 0 }
- gt: { snapshots.0.stats.processed_files: 0 }
- gt: { snapshots.0.stats.total_size_in_bytes: 0 }
- gt: { snapshots.0.stats.processed_size_in_bytes: 0 }

---
"Get missing snapshot status throws an exception":
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,8 @@ private SnapshotIndexShardStatus() {
throw new IllegalArgumentException("Unknown stage type " + indexShardStatus.getStage());
}
this.stats = new SnapshotStats(indexShardStatus.getStartTime(), indexShardStatus.getTotalTime(),
indexShardStatus.getNumberOfFiles(), indexShardStatus.getProcessedFiles(),
indexShardStatus.getTotalSize(), indexShardStatus.getProcessedSize());
indexShardStatus.getIncrementalFileCount(), indexShardStatus.getTotalFileCount(), indexShardStatus.getProcessedFileCount(),
indexShardStatus.getIncrementalSize(), indexShardStatus.getTotalSize(), indexShardStatus.getProcessedSize());
this.failure = indexShardStatus.getFailure();
this.nodeId = nodeId;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

package org.elasticsearch.action.admin.cluster.snapshots.status;

import org.elasticsearch.Version;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
Expand All @@ -34,19 +35,25 @@ public class SnapshotStats implements Streamable, ToXContentFragment {

private long startTime;
private long time;
private int numberOfFiles;
private int processedFiles;
private int incrementalFileCount;
private int totalFileCount;
private int processedFileCount;
private long incrementalSize;
private long totalSize;
private long processedSize;

SnapshotStats() {
}

SnapshotStats(long startTime, long time, int numberOfFiles, int processedFiles, long totalSize, long processedSize) {
SnapshotStats(long startTime, long time,
int incrementalFileCount, int totalFileCount, int processedFileCount,
long incrementalSize, long totalSize, long processedSize) {
this.startTime = startTime;
this.time = time;
this.numberOfFiles = numberOfFiles;
this.processedFiles = processedFiles;
this.incrementalFileCount = incrementalFileCount;
this.totalFileCount = totalFileCount;
this.processedFileCount = processedFileCount;
this.incrementalSize = incrementalSize;
this.totalSize = totalSize;
this.processedSize = processedSize;
}
Expand All @@ -66,17 +73,31 @@ public long getTime() {
}

/**
* Returns number of files in the snapshot
* Returns incremental file count of the snapshot
*/
public int getNumberOfFiles() {
return numberOfFiles;
public int getIncrementalFileCount() {
return incrementalFileCount;
}

/**
* Returns total number of files in the snapshot
*/
public int getTotalFileCount() {
return totalFileCount;
}

/**
* Returns number of files in the snapshot that were processed so far
*/
public int getProcessedFiles() {
return processedFiles;
public int getProcessedFileCount() {
return processedFileCount;
}

/**
* Return incremental files size of the snapshot
*/
public long getIncrementalSize() {
return incrementalSize;
}

/**
Expand Down Expand Up @@ -105,59 +126,109 @@ public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(startTime);
out.writeVLong(time);

out.writeVInt(numberOfFiles);
out.writeVInt(processedFiles);
out.writeVInt(incrementalFileCount);
out.writeVInt(processedFileCount);

out.writeVLong(totalSize);
out.writeVLong(incrementalSize);
out.writeVLong(processedSize);

if (out.getVersion().onOrAfter(Version.V_6_4_0)) {
out.writeVInt(totalFileCount);
out.writeVLong(totalSize);
}
}

@Override
public void readFrom(StreamInput in) throws IOException {
startTime = in.readVLong();
time = in.readVLong();

numberOfFiles = in.readVInt();
processedFiles = in.readVInt();
incrementalFileCount = in.readVInt();
processedFileCount = in.readVInt();

totalSize = in.readVLong();
incrementalSize = in.readVLong();
processedSize = in.readVLong();

if (in.getVersion().onOrAfter(Version.V_6_4_0)) {
totalFileCount = in.readVInt();
totalSize = in.readVLong();
} else {
totalFileCount = incrementalFileCount;
totalSize = incrementalSize;
}
}

static final class Fields {
static final String STATS = "stats";

static final String INCREMENTAL = "incremental";
static final String PROCESSED = "processed";
static final String TOTAL = "total";

static final String FILE_COUNT = "file_count";
static final String SIZE = "size";
static final String SIZE_IN_BYTES = "size_in_bytes";

static final String START_TIME_IN_MILLIS = "start_time_in_millis";
static final String TIME_IN_MILLIS = "time_in_millis";
static final String TIME = "time";

// BWC
static final String NUMBER_OF_FILES = "number_of_files";
static final String PROCESSED_FILES = "processed_files";
static final String TOTAL_SIZE_IN_BYTES = "total_size_in_bytes";
static final String TOTAL_SIZE = "total_size";
static final String TOTAL_SIZE_IN_BYTES = "total_size_in_bytes";
static final String PROCESSED_SIZE_IN_BYTES = "processed_size_in_bytes";
static final String PROCESSED_SIZE = "processed_size";
static final String START_TIME_IN_MILLIS = "start_time_in_millis";
static final String TIME_IN_MILLIS = "time_in_millis";
static final String TIME = "time";

}

@Override
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.startObject(Fields.STATS);
builder.field(Fields.NUMBER_OF_FILES, getNumberOfFiles());
builder.field(Fields.PROCESSED_FILES, getProcessedFiles());
builder.humanReadableField(Fields.TOTAL_SIZE_IN_BYTES, Fields.TOTAL_SIZE, new ByteSizeValue(getTotalSize()));
builder.humanReadableField(Fields.PROCESSED_SIZE_IN_BYTES, Fields.PROCESSED_SIZE, new ByteSizeValue(getProcessedSize()));
builder.field(Fields.START_TIME_IN_MILLIS, getStartTime());
builder.humanReadableField(Fields.TIME_IN_MILLIS, Fields.TIME, new TimeValue(getTime()));
builder.endObject();
return builder;
builder.startObject(Fields.STATS)
// incremental starts
.startObject(Fields.INCREMENTAL)
.field(Fields.FILE_COUNT, getIncrementalFileCount())
.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(getIncrementalSize()))
// incremental ends
.endObject();

if (getProcessedFileCount() != getIncrementalFileCount()) {
// processed starts
builder.startObject(Fields.PROCESSED)
.field(Fields.FILE_COUNT, getProcessedFileCount())
.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(getProcessedSize()))
// processed ends
.endObject();
}
// total starts
builder.startObject(Fields.TOTAL)
.field(Fields.FILE_COUNT, getTotalFileCount())
.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(getTotalSize()))
// total ends
.endObject();
// timings stats
builder.field(Fields.START_TIME_IN_MILLIS, getStartTime())
.humanReadableField(Fields.TIME_IN_MILLIS, Fields.TIME, new TimeValue(getTime()));

// BWC part
return builder.field(Fields.NUMBER_OF_FILES, getIncrementalFileCount())
.field(Fields.PROCESSED_FILES, getProcessedFileCount())
.humanReadableField(Fields.TOTAL_SIZE_IN_BYTES, Fields.TOTAL_SIZE, new ByteSizeValue(getIncrementalSize()))
.humanReadableField(Fields.PROCESSED_SIZE_IN_BYTES, Fields.PROCESSED_SIZE, new ByteSizeValue(getProcessedSize()))
// BWC part ends
.endObject();
}

void add(SnapshotStats stats) {
numberOfFiles += stats.numberOfFiles;
processedFiles += stats.processedFiles;
incrementalFileCount += stats.incrementalFileCount;
totalFileCount += stats.totalFileCount;
processedFileCount += stats.processedFileCount;

incrementalSize += stats.incrementalSize;
totalSize += stats.totalSize;
processedSize += stats.processedSize;


if (startTime == 0) {
// First time here
startTime = stats.startTime;
Expand Down
Loading

0 comments on commit b006f3b

Please sign in to comment.