Amazon Elasticache Redis Cross Region Snapshot Copy For Disaster Recovery
To setup the Amazon ElastiCache for Redshift cross region snapshot copy, we need to follow the below steps.
- List all the clusters and get its available snapshots.
- Find the most recent snapshot for each cluster.
- Use export to S3 option to export the Redis native backup file to S3.
- Enable the cross region replication from S3 source bucket to the target bucket.
By default export to S3 option might not available on all the region, so we need to enable the bucket ACL and add the Canonical to enable this option.
Set up ACL #
- Go to S3 Bucket -> Permission -> Object Ownership and Edit.
- Click on ACLs enabled
- Set the bucket ownership as Bucket owner preferred
- Then save changes.
Now under the ACL, Edit these options.
- Access for other AWS accounts -
540804c33a284a299d2547575ce1010f2312ef3da9b3a053c8bc45bf233e4353
- Objects -
List, Write
- Bucket ACL -
Read, Write
- Then save changes.
Bucket Policy:
We setup from Mumbai region, so the endpoint mentioned as ap-south-1
{
"Version": "2012-10-17",
"Id": "Policy15397346",
"Statement": [
{
"Sid": "Stmt15399483",
"Effect": "Allow",
"Principal": {
"Service": "elasticache.amazonaws.com"
},
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::bhuvi-dr-elastic-cache-target-backup",
"arn:aws:s3:::bhuvi-dr-elastic-cache-target-backup/*"
]
},
{
"Sid": "Stmt15399484",
"Effect": "Allow",
"Principal": {
"Service": "ap-south-1.elasticache-snapshot.amazonaws.com"
},
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::bhuvi-dr-elastic-cache-target-backup",
"arn:aws:s3:::bhuvi-dr-elastic-cache-target-backup/*"
]
}
]
}
IAM role for S3 replication: #
We are replicating the Mumbai region to Hyderabad region using S3 replication. This requires an IAM role to replicate the objects. So assign the following IAM policy to the IAM role that is going to be used by the IAM role for replication.
{
"Version":"2012-10-17",
"Statement":[
{
"Effect":"Allow",
"Action":[
"s3:GetReplicationConfiguration",
"s3:ListBucket"
],
"Resource":[
"arn:aws:s3:::bhuvi-dr-elastic-cache-source-backup"
]
},
{
"Effect":"Allow",
"Action":[
"s3:GetObjectVersionForReplication",
"s3:GetObjectVersionAcl",
"s3:GetObjectVersionTagging"
],
"Resource":[
"arn:aws:s3:::bhuvi-dr-elastic-cache-source-backup/*"
]
},
{
"Effect":"Allow",
"Action":[
"s3:ReplicateObject",
"s3:ReplicateDelete",
"s3:ReplicateTags"
],
"Resource":"arn:aws:s3:::bhuvi-dr-elastic-cache-target-backup/*"
}
]
}
IAM role for ElastiCache Export to S3: #
Create an IAM role for the lambda function and assign the following inline policy to export the snapshot to S3.
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"elasticache:CopySnapshot",
"elasticache:DescribeSnapshots",
"elasticache:DescribeCacheClusters"
],
"Resource": [
"arn:aws:elasticache:ap-south-1:ACCOUNT_ID:cluster:*",
"arn:aws:elasticache:ap-south-1:ACCOUNT_ID:snapshot:*"
]
},
{
"Sid": "VisualEditor1",
"Effect": "Allow",
"Action": [
"s3:GetBucketLocation",
"s3:PutObject",
"s3:GetObject",
"s3:DeleteObject",
"s3:ListAllMyBuckets",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::bhuvi-dr-elastic-cache-source-backup",
"arn:aws:s3:::bhuvi-dr-elastic-cache-source-backup/*"
]
},
{
"Sid": "VisualEditor2",
"Effect": "Allow",
"Action": [
"s3:ListAllMyBuckets",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::*"
]
}
]
}
Lambda function code: #
import boto3
from datetime import datetime
client = boto3.client('elasticache')
def lambda_handler(event, context):
elasticache_clusters = get_elasticache_clusters()
for cluster_id in elasticache_clusters:
print('Processing cluster id: ' + cluster_id)
latest_snapshot_name = get_latest_snapshot_name(cluster_id)
if latest_snapshot_name:
print(f"Latest Snapshot Name for cluster {cluster_id}: {latest_snapshot_name}")
s3_bucket = 'bhuvi-dr-elastic-cache-source-backup'
copy_snapshot_to_s3(cluster_id, latest_snapshot_name, s3_bucket)
print('-----------------------------')
else:
print(f"No snapshots found for cluster ID: {cluster_id}")
print('-----------------------------')
###################
## Sub functions ##
###################
def get_elasticache_clusters():
# Describe ElastiCache clusters
response = client.describe_cache_clusters()
# Extract cache cluster IDs
cluster_ids = [cluster['CacheClusterId'] for cluster in response.get('CacheClusters', [])]
return cluster_ids
def get_latest_snapshot_name(cluster_id):
# Describe snapshots for the specified cluster
response = client.describe_snapshots(
CacheClusterId=cluster_id
)
# Sort snapshots by the creation time of the first node snapshot and get the latest snapshot name
snapshots = response.get('Snapshots', [])
sorted_snapshots = sorted(snapshots, key=lambda x: x.get('NodeSnapshots', [{}])[0].get('SnapshotCreateTime', ''), reverse=True)
if sorted_snapshots:
latest_snapshot_name = sorted_snapshots[0].get('SnapshotName', 'N/A')
return latest_snapshot_name
else:
print(f"No snapshots found for cluster ID: {cluster_id}")
return None
def copy_snapshot_to_s3(cluster_id, snapshot_name, s3_bucket):
current_timestamp = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
current_date = datetime.now().strftime("%Y-%m-%d")
cluster_name = '-'.join(cluster_id.split('-')[:-1])
copied_snapshot_name = cluster_name + '/' + current_date + '/' + snapshot_name + '-' + current_timestamp
response = client.copy_snapshot(
SourceSnapshotName=snapshot_name,
TargetSnapshotName=copied_snapshot_name,
TargetBucket=s3_bucket
)
print(f"Copy Snapshot operation response: {response}")