forked from awslabs/amazon-redshift-utils
-
Notifications
You must be signed in to change notification settings - Fork 0
/
config.json
49 lines (48 loc) · 2.68 KB
/
config.json
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
// example unload/copy utility config file. please remove all comments '//' as they are invalid json
{
// the source database from which we'll export data
"unloadSource": {
"clusterEndpoint": "my-cluster.d7bdmd4addft.eu-west-1.redshift.amazonaws.com",
"clusterPort": 5439,
// base 64 encoded password for the user to UNLOAD data as. Use the encryptValue.sh utility to generate this string
"connectPwd": "my base 64 encoded password",
"connectUser": "master",
"db": "mydb",
"schemaName": "public",
"tableName": "export_table",
//Optional list of columns to appear in the select statement. E.g. if you have autogenerated ids here list the other columns
//You can use any SQL or redhift built in functions that would be allowed in an UNLOAD statement
"columns" : "column1,column2"
},
// location and credentials for S3, which are used to store migrated data while in flight
"s3Staging": {
// Either use AWS IAM role or specify access_key / secret for s3 access. Read http://docs.aws.amazon.com/redshift/latest/mgmt/copy-unload-iam-role.html
// AWS IAM Role to use. If role is specified, access keys are ignored
"aws_iam_role": "aws iam role which is assigned to Redshift and has access to the s3 bucket",
// base 64 encoded AWS Access Key used to access S3. Use the encryptValue.sh utility to generate this string
"aws_access_key_id": "my base 64 encoded access key",
// base 64 encoded AWS Secret Access Key used to access S3. Use the encryptValue.sh utility to generate this string
"aws_secret_access_key": "my base 64 encoded secret key",
// path on S3 to use for storing in-flight data. The current date and time is appended to the prefix
"path": "s3://my-bucket/prefix/",
"deleteOnSuccess": "True",
// region to use for the S3 export
"region": "us-east-1"
},
// the destination database into which we will copy data
"copyTarget": {
"clusterEndpoint": "my-other-cluster.d7bdmd4addft.eu-west-1.redshift.amazonaws.com",
"clusterPort": 5439,
// base 64 encoded password for the user to COPY data as. Use the encryptValue.sh utility to generate this string
"connectPwd": "my base 64 encoded password",
"connectUser": "master",
"db": "mydb",
"schemaName": "public",
"tableName": "import_table",
//Optional List of columns. Probably best used in conjunction with unloadSource.columns, unless the schemas are different and you know what you are doing
"columns" : "column1,column2",
//Optinak Boolean indicating whether COPY command should use exlicit_ids:
// https://docs.aws.amazon.com/redshift/latest/dg/copy-parameters-data-conversion.html#copy-explicit-ids
"exlicit_ids": true
}
}