-
Notifications
You must be signed in to change notification settings - Fork 25k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Remove deprecated S3 settings #24445
Changes from 1 commit
1761524
5a15dae
70dea73
aa8d020
b39b418
0b31d2b
e9db3d6
04c708f
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -58,149 +58,67 @@ class S3Repository extends BlobStoreRepository { | |
* NOTE: These are legacy settings. Use the named client config settings above. | ||
*/ | ||
public interface Repositories { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Not part of this change but you can remove |
||
/** | ||
* repositories.s3.bucket: The name of the bucket to be used for snapshots. | ||
*/ | ||
Setting<String> BUCKET_SETTING = Setting.simpleString("repositories.s3.bucket", Property.NodeScope, Property.Deprecated); | ||
/** | ||
* repositories.s3.server_side_encryption: When set to true files are encrypted on server side using AES256 algorithm. | ||
* Defaults to false. | ||
*/ | ||
Setting<Boolean> SERVER_SIDE_ENCRYPTION_SETTING = | ||
Setting.boolSetting("repositories.s3.server_side_encryption", false, Property.NodeScope, Property.Deprecated); | ||
|
||
/** | ||
* Default is to use 100MB (S3 defaults) for heaps above 2GB and 5% of | ||
* the available memory for smaller heaps. | ||
*/ | ||
ByteSizeValue DEFAULT_BUFFER_SIZE = new ByteSizeValue( | ||
Math.max( | ||
ByteSizeUnit.MB.toBytes(5), // minimum value | ||
Math.min( | ||
ByteSizeUnit.MB.toBytes(100), | ||
JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() / 20)), | ||
ByteSizeUnit.BYTES); | ||
|
||
/** | ||
* repositories.s3.buffer_size: Minimum threshold below which the chunk is uploaded using a single request. Beyond this threshold, | ||
* the S3 repository will use the AWS Multipart Upload API to split the chunk into several parts, each of buffer_size length, and | ||
* to upload each part in its own request. Note that setting a buffer size lower than 5mb is not allowed since it will prevents the | ||
* use of the Multipart API and may result in upload errors. Defaults to the minimum between 100MB and 5% of the heap size. | ||
*/ | ||
Setting<ByteSizeValue> BUFFER_SIZE_SETTING = | ||
Setting.byteSizeSetting("repositories.s3.buffer_size", DEFAULT_BUFFER_SIZE, | ||
new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB), Property.NodeScope, Property.Deprecated); | ||
/** | ||
* repositories.s3.max_retries: Number of retries in case of S3 errors. Defaults to 3. | ||
*/ | ||
Setting<Integer> MAX_RETRIES_SETTING = Setting.intSetting("repositories.s3.max_retries", 3, Property.NodeScope, Property.Deprecated); | ||
/** | ||
* repositories.s3.use_throttle_retries: Set to `true` if you want to throttle retries. Defaults to AWS SDK default value (`false`). | ||
*/ | ||
Setting<Boolean> USE_THROTTLE_RETRIES_SETTING = Setting.boolSetting("repositories.s3.use_throttle_retries", | ||
ClientConfiguration.DEFAULT_THROTTLE_RETRIES, Property.NodeScope, Property.Deprecated); | ||
/** | ||
* repositories.s3.chunk_size: Big files can be broken down into chunks during snapshotting if needed. Defaults to 1g. | ||
*/ | ||
Setting<ByteSizeValue> CHUNK_SIZE_SETTING = | ||
Setting.byteSizeSetting("repositories.s3.chunk_size", new ByteSizeValue(1, ByteSizeUnit.GB), | ||
new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB), Property.NodeScope, Property.Deprecated); | ||
/** | ||
* repositories.s3.compress: When set to true metadata files are stored in compressed format. This setting doesn’t affect index | ||
* files that are already compressed by default. Defaults to false. | ||
*/ | ||
Setting<Boolean> COMPRESS_SETTING = Setting.boolSetting("repositories.s3.compress", false, Property.NodeScope, Property.Deprecated); | ||
/** | ||
* repositories.s3.storage_class: Sets the S3 storage class type for the backup files. Values may be standard, reduced_redundancy, | ||
* standard_ia. Defaults to standard. | ||
*/ | ||
Setting<String> STORAGE_CLASS_SETTING = Setting.simpleString("repositories.s3.storage_class", Property.NodeScope, Property.Deprecated); | ||
/** | ||
* repositories.s3.canned_acl: The S3 repository supports all S3 canned ACLs : private, public-read, public-read-write, | ||
* authenticated-read, log-delivery-write, bucket-owner-read, bucket-owner-full-control. Defaults to private. | ||
*/ | ||
Setting<String> CANNED_ACL_SETTING = Setting.simpleString("repositories.s3.canned_acl", Property.NodeScope, Property.Deprecated); | ||
/** | ||
* repositories.s3.base_path: Specifies the path within bucket to repository data. Defaults to root directory. | ||
*/ | ||
Setting<String> BASE_PATH_SETTING = Setting.simpleString("repositories.s3.base_path", Property.NodeScope, Property.Deprecated); | ||
/** | ||
* repositories.s3.path_style_access: When set to true configures the client to use path-style access for all requests. | ||
Amazon S3 supports virtual-hosted-style and path-style access in all Regions. The path-style syntax, however, | ||
requires that you use the region-specific endpoint when attempting to access a bucket. | ||
The default behaviour is to detect which access style to use based on the configured endpoint (an IP will result | ||
in path-style access) and the bucket being accessed (some buckets are not valid DNS names). Setting this flag | ||
will result in path-style access being used for all requests. | ||
*/ | ||
Setting<Boolean> PATH_STYLE_ACCESS_SETTING = Setting.boolSetting("repositories.s3.path_style_access", false, | ||
Property.NodeScope, Property.Deprecated); | ||
|
||
} | ||
|
||
/** | ||
* Per S3 repository specific settings. Same settings as Repositories settings but without the repositories.s3 prefix. | ||
* If undefined, they use the repositories.s3.xxx equivalent setting. | ||
* Default is to use 100MB (S3 defaults) for heaps above 2GB and 5% of | ||
* the available memory for smaller heaps. | ||
*/ | ||
public interface Repository { | ||
|
||
Setting<String> BUCKET_SETTING = Setting.simpleString("bucket"); | ||
|
||
/** | ||
* server_side_encryption | ||
* @see Repositories#SERVER_SIDE_ENCRYPTION_SETTING | ||
*/ | ||
Setting<Boolean> SERVER_SIDE_ENCRYPTION_SETTING = Setting.boolSetting("server_side_encryption", false); | ||
|
||
/** | ||
* buffer_size | ||
* @see Repositories#BUFFER_SIZE_SETTING | ||
*/ | ||
Setting<ByteSizeValue> BUFFER_SIZE_SETTING = | ||
Setting.byteSizeSetting("buffer_size", Repositories.DEFAULT_BUFFER_SIZE, | ||
new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB)); | ||
/** | ||
* max_retries | ||
* @see Repositories#MAX_RETRIES_SETTING | ||
*/ | ||
Setting<Integer> MAX_RETRIES_SETTING = Setting.intSetting("max_retries", 3, Property.Deprecated); | ||
/** | ||
* use_throttle_retries | ||
* @see Repositories#USE_THROTTLE_RETRIES_SETTING | ||
*/ | ||
Setting<Boolean> USE_THROTTLE_RETRIES_SETTING = Setting.boolSetting("use_throttle_retries", | ||
ClientConfiguration.DEFAULT_THROTTLE_RETRIES, Property.Deprecated); | ||
/** | ||
* chunk_size | ||
* @see Repositories#CHUNK_SIZE_SETTING | ||
*/ | ||
Setting<ByteSizeValue> CHUNK_SIZE_SETTING = | ||
Setting.byteSizeSetting("chunk_size", new ByteSizeValue(1, ByteSizeUnit.GB), | ||
new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB)); | ||
/** | ||
* compress | ||
* @see Repositories#COMPRESS_SETTING | ||
*/ | ||
Setting<Boolean> COMPRESS_SETTING = Setting.boolSetting("compress", false); | ||
/** | ||
* storage_class | ||
* @see Repositories#STORAGE_CLASS_SETTING | ||
*/ | ||
Setting<String> STORAGE_CLASS_SETTING = Setting.simpleString("storage_class"); | ||
/** | ||
* canned_acl | ||
* @see Repositories#CANNED_ACL_SETTING | ||
*/ | ||
Setting<String> CANNED_ACL_SETTING = Setting.simpleString("canned_acl"); | ||
/** | ||
* base_path | ||
* @see Repositories#BASE_PATH_SETTING | ||
*/ | ||
Setting<String> BASE_PATH_SETTING = Setting.simpleString("base_path"); | ||
/** | ||
* path_style_access | ||
* @see Repositories#PATH_STYLE_ACCESS_SETTING | ||
*/ | ||
Setting<Boolean> PATH_STYLE_ACCESS_SETTING = Setting.boolSetting("path_style_access", false, Property.Deprecated); | ||
} | ||
static final ByteSizeValue DEFAULT_BUFFER_SIZE = new ByteSizeValue( | ||
Math.max( | ||
ByteSizeUnit.MB.toBytes(5), // minimum value | ||
Math.min( | ||
ByteSizeUnit.MB.toBytes(100), | ||
JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() / 20)), | ||
ByteSizeUnit.BYTES); | ||
|
||
|
||
static final Setting<String> BUCKET_SETTING = Setting.simpleString("bucket"); | ||
|
||
/** | ||
* When set to true files are encrypted on server side using AES256 algorithm. | ||
* Defaults to false. | ||
*/ | ||
static final Setting<Boolean> SERVER_SIDE_ENCRYPTION_SETTING = Setting.boolSetting("server_side_encryption", false); | ||
|
||
/** | ||
* Minimum threshold below which the chunk is uploaded using a single request. Beyond this threshold, | ||
* the S3 repository will use the AWS Multipart Upload API to split the chunk into several parts, each of buffer_size length, and | ||
* to upload each part in its own request. Note that setting a buffer size lower than 5mb is not allowed since it will prevents the | ||
* use of the Multipart API and may result in upload errors. Defaults to the minimum between 100MB and 5% of the heap size. | ||
*/ | ||
static final Setting<ByteSizeValue> BUFFER_SIZE_SETTING = Setting.byteSizeSetting("buffer_size", DEFAULT_BUFFER_SIZE, | ||
new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB)); | ||
|
||
/** | ||
* Big files can be broken down into chunks during snapshotting if needed. Defaults to 1g. | ||
*/ | ||
static final Setting<ByteSizeValue> CHUNK_SIZE_SETTING = Setting.byteSizeSetting("chunk_size", new ByteSizeValue(1, ByteSizeUnit.GB), | ||
new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB)); | ||
|
||
/** | ||
* When set to true metadata files are stored in compressed format. This setting doesn’t affect index | ||
* files that are already compressed by default. Defaults to false. | ||
*/ | ||
static final Setting<Boolean> COMPRESS_SETTING = Setting.boolSetting("compress", false); | ||
|
||
/** | ||
* Sets the S3 storage class type for the backup files. Values may be standard, reduced_redundancy, | ||
* standard_ia. Defaults to standard. | ||
*/ | ||
static final Setting<String> STORAGE_CLASS_SETTING = Setting.simpleString("storage_class"); | ||
|
||
/** | ||
* The S3 repository supports all S3 canned ACLs : private, public-read, public-read-write, | ||
* authenticated-read, log-delivery-write, bucket-owner-read, bucket-owner-full-control. Defaults to private. | ||
*/ | ||
static final Setting<String> CANNED_ACL_SETTING = Setting.simpleString("canned_acl"); | ||
|
||
/** | ||
* Specifies the path within bucket to repository data. Defaults to root directory. | ||
*/ | ||
static final Setting<String> BASE_PATH_SETTING = Setting.simpleString("base_path"); | ||
|
||
private final S3BlobStore blobStore; | ||
|
||
|
@@ -217,25 +135,25 @@ public interface Repository { | |
NamedXContentRegistry namedXContentRegistry, AwsS3Service s3Service) throws IOException { | ||
super(metadata, settings, namedXContentRegistry); | ||
|
||
String bucket = getValue(metadata.settings(), settings, Repository.BUCKET_SETTING, Repositories.BUCKET_SETTING); | ||
String bucket = BUCKET_SETTING.get(metadata.settings()); | ||
if (bucket == null) { | ||
throw new RepositoryException(metadata.name(), "No bucket defined for s3 gateway"); | ||
} | ||
|
||
boolean serverSideEncryption = getValue(metadata.settings(), settings, Repository.SERVER_SIDE_ENCRYPTION_SETTING, Repositories.SERVER_SIDE_ENCRYPTION_SETTING); | ||
ByteSizeValue bufferSize = getValue(metadata.settings(), settings, Repository.BUFFER_SIZE_SETTING, Repositories.BUFFER_SIZE_SETTING); | ||
this.chunkSize = getValue(metadata.settings(), settings, Repository.CHUNK_SIZE_SETTING, Repositories.CHUNK_SIZE_SETTING); | ||
this.compress = getValue(metadata.settings(), settings, Repository.COMPRESS_SETTING, Repositories.COMPRESS_SETTING); | ||
boolean serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings()); | ||
ByteSizeValue bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings()); | ||
this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings()); | ||
this.compress = COMPRESS_SETTING.get(metadata.settings()); | ||
|
||
// We make sure that chunkSize is bigger or equal than/to bufferSize | ||
if (this.chunkSize.getBytes() < bufferSize.getBytes()) { | ||
throw new RepositoryException(metadata.name(), Repository.CHUNK_SIZE_SETTING.getKey() + " (" + this.chunkSize + | ||
") can't be lower than " + Repository.BUFFER_SIZE_SETTING.getKey() + " (" + bufferSize + ")."); | ||
throw new RepositoryException(metadata.name(), CHUNK_SIZE_SETTING.getKey() + " (" + this.chunkSize + | ||
") can't be lower than " + BUFFER_SIZE_SETTING.getKey() + " (" + bufferSize + ")."); | ||
} | ||
|
||
// Parse and validate the user's S3 Storage Class setting | ||
String storageClass = getValue(metadata.settings(), settings, Repository.STORAGE_CLASS_SETTING, Repositories.STORAGE_CLASS_SETTING); | ||
String cannedACL = getValue(metadata.settings(), settings, Repository.CANNED_ACL_SETTING, Repositories.CANNED_ACL_SETTING); | ||
String storageClass = STORAGE_CLASS_SETTING.get(metadata.settings()); | ||
String cannedACL = CANNED_ACL_SETTING.get(metadata.settings()); | ||
|
||
logger.debug("using bucket [{}], chunk_size [{}], server_side_encryption [{}], " + | ||
"buffer_size [{}], cannedACL [{}], storageClass [{}]", | ||
|
@@ -244,13 +162,8 @@ public interface Repository { | |
AmazonS3 client = s3Service.client(metadata.settings()); | ||
blobStore = new S3BlobStore(settings, client, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); | ||
|
||
String basePath = getValue(metadata.settings(), settings, Repository.BASE_PATH_SETTING, Repositories.BASE_PATH_SETTING); | ||
String basePath = BASE_PATH_SETTING.get(metadata.settings()); | ||
if (Strings.hasLength(basePath)) { | ||
if (basePath.startsWith("/")) { | ||
basePath = basePath.substring(1); | ||
deprecationLogger.deprecated("S3 repository base_path trimming the leading `/`, and " + | ||
"leading `/` will not be supported for the S3 repository in future releases"); | ||
} | ||
this.basePath = new BlobPath().add(basePath); | ||
} else { | ||
this.basePath = BlobPath.cleanPath(); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Not related to this change but method |
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I wonder if we should default to
ClientConfiguration.DEFAULT_RETRY_POLICY.getMaxErrorRetry()
instead?Or to
PredefinedRetryPolicies.DEFAULT_MAX_ERROR_RETRY
?