Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove deprecated S3 settings #24445

Merged
merged 8 commits into from
May 11, 2017
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -70,33 +70,17 @@ public synchronized AmazonS3 client(Settings repositorySettings) {

S3ClientSettings clientSettings = clientsSettings.get(clientName);
if (clientSettings == null) {
throw new IllegalArgumentException("Unknown s3 client name [" + clientName + "]. " +
"Existing client configs: " +
throw new IllegalArgumentException("Unknown s3 client name [" + clientName + "]. Existing client configs: " +
Strings.collectionToDelimitedString(clientsSettings.keySet(), ","));
}

// If the user defined a path style access setting, we rely on it,
// otherwise we use the default value set by the SDK
Boolean pathStyleAccess = null;
if (S3Repository.Repository.PATH_STYLE_ACCESS_SETTING.exists(repositorySettings) ||
S3Repository.Repositories.PATH_STYLE_ACCESS_SETTING.exists(settings)) {
pathStyleAccess = getValue(repositorySettings, settings,
S3Repository.Repository.PATH_STYLE_ACCESS_SETTING,
S3Repository.Repositories.PATH_STYLE_ACCESS_SETTING);
}

logger.debug("creating S3 client with client_name [{}], endpoint [{}], path_style_access [{}]",
clientName, clientSettings.endpoint, pathStyleAccess);
logger.debug("creating S3 client with client_name [{}], endpoint [{}]", clientName, clientSettings.endpoint);

AWSCredentialsProvider credentials = buildCredentials(logger, clientSettings);
ClientConfiguration configuration = buildConfiguration(clientSettings, repositorySettings);

client = new AmazonS3Client(credentials, configuration);

if (pathStyleAccess != null) {
client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(pathStyleAccess));
}

if (Strings.hasText(clientSettings.endpoint)) {
client.setEndpoint(clientSettings.endpoint);
}
Expand All @@ -121,14 +105,8 @@ static ClientConfiguration buildConfiguration(S3ClientSettings clientSettings, S
clientConfiguration.setProxyPassword(clientSettings.proxyPassword);
}

Integer maxRetries = getRepoValue(repositorySettings, S3Repository.Repository.MAX_RETRIES_SETTING, clientSettings.maxRetries);
if (maxRetries != null) {
// If not explicitly set, default to 3 with exponential backoff policy
clientConfiguration.setMaxErrorRetry(maxRetries);
}
boolean useThrottleRetries = getRepoValue(repositorySettings,
S3Repository.Repository.USE_THROTTLE_RETRIES_SETTING, clientSettings.throttleRetries);
clientConfiguration.setUseThrottleRetries(useThrottleRetries);
clientConfiguration.setMaxErrorRetry(clientSettings.maxRetries);
clientConfiguration.setUseThrottleRetries(clientSettings.throttleRetries);
clientConfiguration.setSocketTimeout(clientSettings.readTimeoutMillis);

return clientConfiguration;
Expand All @@ -145,14 +123,6 @@ static AWSCredentialsProvider buildCredentials(Logger logger, S3ClientSettings c
}
}

/** Returns the value for a given setting from the repository, or returns the fallback value. */
private static <T> T getRepoValue(Settings repositorySettings, Setting<T> repositorySetting, T fallback) {
if (repositorySetting.exists(repositorySettings)) {
return repositorySetting.get(repositorySettings);
}
return fallback;
}

@Override
protected void doStart() throws ElasticsearchException {
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,11 +81,11 @@ class S3ClientSettings {

/** The number of retries to use when an s3 request fails. */
static final Setting.AffixSetting<Integer> MAX_RETRIES_SETTING = Setting.affixKeySetting(PREFIX, "max_retries",
key -> Setting.intSetting(key, S3Repository.Repositories.MAX_RETRIES_SETTING, 0, Property.NodeScope));
key -> Setting.intSetting(key, 3, 0, Property.NodeScope));
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wonder if we should default to ClientConfiguration.DEFAULT_RETRY_POLICY.getMaxErrorRetry() instead?
Or to PredefinedRetryPolicies.DEFAULT_MAX_ERROR_RETRY?


/** Whether retries should be throttled (ie use backoff). */
static final Setting.AffixSetting<Boolean> USE_THROTTLE_RETRIES_SETTING = Setting.affixKeySetting(PREFIX, "use_throttle_retries",
key -> Setting.boolSetting(key, S3Repository.Repositories.USE_THROTTLE_RETRIES_SETTING, Property.NodeScope));
key -> Setting.boolSetting(key, ClientConfiguration.DEFAULT_THROTTLE_RETRIES, Property.NodeScope));

/** Credentials to authenticate with s3. */
final BasicAWSCredentials credentials;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,149 +58,67 @@ class S3Repository extends BlobStoreRepository {
* NOTE: These are legacy settings. Use the named client config settings above.
*/
public interface Repositories {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not part of this change but you can remove public here. Also for public static final String TYPE = "s3";

/**
* repositories.s3.bucket: The name of the bucket to be used for snapshots.
*/
Setting<String> BUCKET_SETTING = Setting.simpleString("repositories.s3.bucket", Property.NodeScope, Property.Deprecated);
/**
* repositories.s3.server_side_encryption: When set to true files are encrypted on server side using AES256 algorithm.
* Defaults to false.
*/
Setting<Boolean> SERVER_SIDE_ENCRYPTION_SETTING =
Setting.boolSetting("repositories.s3.server_side_encryption", false, Property.NodeScope, Property.Deprecated);

/**
* Default is to use 100MB (S3 defaults) for heaps above 2GB and 5% of
* the available memory for smaller heaps.
*/
ByteSizeValue DEFAULT_BUFFER_SIZE = new ByteSizeValue(
Math.max(
ByteSizeUnit.MB.toBytes(5), // minimum value
Math.min(
ByteSizeUnit.MB.toBytes(100),
JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() / 20)),
ByteSizeUnit.BYTES);

/**
* repositories.s3.buffer_size: Minimum threshold below which the chunk is uploaded using a single request. Beyond this threshold,
* the S3 repository will use the AWS Multipart Upload API to split the chunk into several parts, each of buffer_size length, and
* to upload each part in its own request. Note that setting a buffer size lower than 5mb is not allowed since it will prevents the
* use of the Multipart API and may result in upload errors. Defaults to the minimum between 100MB and 5% of the heap size.
*/
Setting<ByteSizeValue> BUFFER_SIZE_SETTING =
Setting.byteSizeSetting("repositories.s3.buffer_size", DEFAULT_BUFFER_SIZE,
new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB), Property.NodeScope, Property.Deprecated);
/**
* repositories.s3.max_retries: Number of retries in case of S3 errors. Defaults to 3.
*/
Setting<Integer> MAX_RETRIES_SETTING = Setting.intSetting("repositories.s3.max_retries", 3, Property.NodeScope, Property.Deprecated);
/**
* repositories.s3.use_throttle_retries: Set to `true` if you want to throttle retries. Defaults to AWS SDK default value (`false`).
*/
Setting<Boolean> USE_THROTTLE_RETRIES_SETTING = Setting.boolSetting("repositories.s3.use_throttle_retries",
ClientConfiguration.DEFAULT_THROTTLE_RETRIES, Property.NodeScope, Property.Deprecated);
/**
* repositories.s3.chunk_size: Big files can be broken down into chunks during snapshotting if needed. Defaults to 1g.
*/
Setting<ByteSizeValue> CHUNK_SIZE_SETTING =
Setting.byteSizeSetting("repositories.s3.chunk_size", new ByteSizeValue(1, ByteSizeUnit.GB),
new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB), Property.NodeScope, Property.Deprecated);
/**
* repositories.s3.compress: When set to true metadata files are stored in compressed format. This setting doesn’t affect index
* files that are already compressed by default. Defaults to false.
*/
Setting<Boolean> COMPRESS_SETTING = Setting.boolSetting("repositories.s3.compress", false, Property.NodeScope, Property.Deprecated);
/**
* repositories.s3.storage_class: Sets the S3 storage class type for the backup files. Values may be standard, reduced_redundancy,
* standard_ia. Defaults to standard.
*/
Setting<String> STORAGE_CLASS_SETTING = Setting.simpleString("repositories.s3.storage_class", Property.NodeScope, Property.Deprecated);
/**
* repositories.s3.canned_acl: The S3 repository supports all S3 canned ACLs : private, public-read, public-read-write,
* authenticated-read, log-delivery-write, bucket-owner-read, bucket-owner-full-control. Defaults to private.
*/
Setting<String> CANNED_ACL_SETTING = Setting.simpleString("repositories.s3.canned_acl", Property.NodeScope, Property.Deprecated);
/**
* repositories.s3.base_path: Specifies the path within bucket to repository data. Defaults to root directory.
*/
Setting<String> BASE_PATH_SETTING = Setting.simpleString("repositories.s3.base_path", Property.NodeScope, Property.Deprecated);
/**
* repositories.s3.path_style_access: When set to true configures the client to use path-style access for all requests.
Amazon S3 supports virtual-hosted-style and path-style access in all Regions. The path-style syntax, however,
requires that you use the region-specific endpoint when attempting to access a bucket.
The default behaviour is to detect which access style to use based on the configured endpoint (an IP will result
in path-style access) and the bucket being accessed (some buckets are not valid DNS names). Setting this flag
will result in path-style access being used for all requests.
*/
Setting<Boolean> PATH_STYLE_ACCESS_SETTING = Setting.boolSetting("repositories.s3.path_style_access", false,
Property.NodeScope, Property.Deprecated);

}

/**
* Per S3 repository specific settings. Same settings as Repositories settings but without the repositories.s3 prefix.
* If undefined, they use the repositories.s3.xxx equivalent setting.
* Default is to use 100MB (S3 defaults) for heaps above 2GB and 5% of
* the available memory for smaller heaps.
*/
public interface Repository {

Setting<String> BUCKET_SETTING = Setting.simpleString("bucket");

/**
* server_side_encryption
* @see Repositories#SERVER_SIDE_ENCRYPTION_SETTING
*/
Setting<Boolean> SERVER_SIDE_ENCRYPTION_SETTING = Setting.boolSetting("server_side_encryption", false);

/**
* buffer_size
* @see Repositories#BUFFER_SIZE_SETTING
*/
Setting<ByteSizeValue> BUFFER_SIZE_SETTING =
Setting.byteSizeSetting("buffer_size", Repositories.DEFAULT_BUFFER_SIZE,
new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB));
/**
* max_retries
* @see Repositories#MAX_RETRIES_SETTING
*/
Setting<Integer> MAX_RETRIES_SETTING = Setting.intSetting("max_retries", 3, Property.Deprecated);
/**
* use_throttle_retries
* @see Repositories#USE_THROTTLE_RETRIES_SETTING
*/
Setting<Boolean> USE_THROTTLE_RETRIES_SETTING = Setting.boolSetting("use_throttle_retries",
ClientConfiguration.DEFAULT_THROTTLE_RETRIES, Property.Deprecated);
/**
* chunk_size
* @see Repositories#CHUNK_SIZE_SETTING
*/
Setting<ByteSizeValue> CHUNK_SIZE_SETTING =
Setting.byteSizeSetting("chunk_size", new ByteSizeValue(1, ByteSizeUnit.GB),
new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB));
/**
* compress
* @see Repositories#COMPRESS_SETTING
*/
Setting<Boolean> COMPRESS_SETTING = Setting.boolSetting("compress", false);
/**
* storage_class
* @see Repositories#STORAGE_CLASS_SETTING
*/
Setting<String> STORAGE_CLASS_SETTING = Setting.simpleString("storage_class");
/**
* canned_acl
* @see Repositories#CANNED_ACL_SETTING
*/
Setting<String> CANNED_ACL_SETTING = Setting.simpleString("canned_acl");
/**
* base_path
* @see Repositories#BASE_PATH_SETTING
*/
Setting<String> BASE_PATH_SETTING = Setting.simpleString("base_path");
/**
* path_style_access
* @see Repositories#PATH_STYLE_ACCESS_SETTING
*/
Setting<Boolean> PATH_STYLE_ACCESS_SETTING = Setting.boolSetting("path_style_access", false, Property.Deprecated);
}
static final ByteSizeValue DEFAULT_BUFFER_SIZE = new ByteSizeValue(
Math.max(
ByteSizeUnit.MB.toBytes(5), // minimum value
Math.min(
ByteSizeUnit.MB.toBytes(100),
JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() / 20)),
ByteSizeUnit.BYTES);


static final Setting<String> BUCKET_SETTING = Setting.simpleString("bucket");

/**
* When set to true files are encrypted on server side using AES256 algorithm.
* Defaults to false.
*/
static final Setting<Boolean> SERVER_SIDE_ENCRYPTION_SETTING = Setting.boolSetting("server_side_encryption", false);

/**
* Minimum threshold below which the chunk is uploaded using a single request. Beyond this threshold,
* the S3 repository will use the AWS Multipart Upload API to split the chunk into several parts, each of buffer_size length, and
* to upload each part in its own request. Note that setting a buffer size lower than 5mb is not allowed since it will prevents the
* use of the Multipart API and may result in upload errors. Defaults to the minimum between 100MB and 5% of the heap size.
*/
static final Setting<ByteSizeValue> BUFFER_SIZE_SETTING = Setting.byteSizeSetting("buffer_size", DEFAULT_BUFFER_SIZE,
new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB));

/**
* Big files can be broken down into chunks during snapshotting if needed. Defaults to 1g.
*/
static final Setting<ByteSizeValue> CHUNK_SIZE_SETTING = Setting.byteSizeSetting("chunk_size", new ByteSizeValue(1, ByteSizeUnit.GB),
new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB));

/**
* When set to true metadata files are stored in compressed format. This setting doesn’t affect index
* files that are already compressed by default. Defaults to false.
*/
static final Setting<Boolean> COMPRESS_SETTING = Setting.boolSetting("compress", false);

/**
* Sets the S3 storage class type for the backup files. Values may be standard, reduced_redundancy,
* standard_ia. Defaults to standard.
*/
static final Setting<String> STORAGE_CLASS_SETTING = Setting.simpleString("storage_class");

/**
* The S3 repository supports all S3 canned ACLs : private, public-read, public-read-write,
* authenticated-read, log-delivery-write, bucket-owner-read, bucket-owner-full-control. Defaults to private.
*/
static final Setting<String> CANNED_ACL_SETTING = Setting.simpleString("canned_acl");

/**
* Specifies the path within bucket to repository data. Defaults to root directory.
*/
static final Setting<String> BASE_PATH_SETTING = Setting.simpleString("base_path");

private final S3BlobStore blobStore;

Expand All @@ -217,25 +135,25 @@ public interface Repository {
NamedXContentRegistry namedXContentRegistry, AwsS3Service s3Service) throws IOException {
super(metadata, settings, namedXContentRegistry);

String bucket = getValue(metadata.settings(), settings, Repository.BUCKET_SETTING, Repositories.BUCKET_SETTING);
String bucket = BUCKET_SETTING.get(metadata.settings());
if (bucket == null) {
throw new RepositoryException(metadata.name(), "No bucket defined for s3 gateway");
}

boolean serverSideEncryption = getValue(metadata.settings(), settings, Repository.SERVER_SIDE_ENCRYPTION_SETTING, Repositories.SERVER_SIDE_ENCRYPTION_SETTING);
ByteSizeValue bufferSize = getValue(metadata.settings(), settings, Repository.BUFFER_SIZE_SETTING, Repositories.BUFFER_SIZE_SETTING);
this.chunkSize = getValue(metadata.settings(), settings, Repository.CHUNK_SIZE_SETTING, Repositories.CHUNK_SIZE_SETTING);
this.compress = getValue(metadata.settings(), settings, Repository.COMPRESS_SETTING, Repositories.COMPRESS_SETTING);
boolean serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings());
ByteSizeValue bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings());
this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings());
this.compress = COMPRESS_SETTING.get(metadata.settings());

// We make sure that chunkSize is bigger or equal than/to bufferSize
if (this.chunkSize.getBytes() < bufferSize.getBytes()) {
throw new RepositoryException(metadata.name(), Repository.CHUNK_SIZE_SETTING.getKey() + " (" + this.chunkSize +
") can't be lower than " + Repository.BUFFER_SIZE_SETTING.getKey() + " (" + bufferSize + ").");
throw new RepositoryException(metadata.name(), CHUNK_SIZE_SETTING.getKey() + " (" + this.chunkSize +
") can't be lower than " + BUFFER_SIZE_SETTING.getKey() + " (" + bufferSize + ").");
}

// Parse and validate the user's S3 Storage Class setting
String storageClass = getValue(metadata.settings(), settings, Repository.STORAGE_CLASS_SETTING, Repositories.STORAGE_CLASS_SETTING);
String cannedACL = getValue(metadata.settings(), settings, Repository.CANNED_ACL_SETTING, Repositories.CANNED_ACL_SETTING);
String storageClass = STORAGE_CLASS_SETTING.get(metadata.settings());
String cannedACL = CANNED_ACL_SETTING.get(metadata.settings());

logger.debug("using bucket [{}], chunk_size [{}], server_side_encryption [{}], " +
"buffer_size [{}], cannedACL [{}], storageClass [{}]",
Expand All @@ -244,13 +162,8 @@ public interface Repository {
AmazonS3 client = s3Service.client(metadata.settings());
blobStore = new S3BlobStore(settings, client, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass);

String basePath = getValue(metadata.settings(), settings, Repository.BASE_PATH_SETTING, Repositories.BASE_PATH_SETTING);
String basePath = BASE_PATH_SETTING.get(metadata.settings());
if (Strings.hasLength(basePath)) {
if (basePath.startsWith("/")) {
basePath = basePath.substring(1);
deprecationLogger.deprecated("S3 repository base_path trimming the leading `/`, and " +
"leading `/` will not be supported for the S3 repository in future releases");
}
this.basePath = new BlobPath().add(basePath);
} else {
this.basePath = BlobPath.cleanPath();
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not related to this change but method public static <T> T getValue(...) at the end of this class can become package private.

Expand Down
Loading