=> Using aws glacier CLI (vs aws s3). This is different with aws s3 with --storage-class GLACIER.
https://stackoverflow.com/questions/46111372/change-s3-bucket-storage-class-to-s3-infrequent-access
Backup all images folder to S3 for all sites
once every other week
and only keep 6 months of back up data
- permission to run aws => root or oanh
- est cost
- find image only
- test missing
- restore use case
- cron remove old log
- folder test
aws s3 cp /tmp/foo
s3://bucket/ --recursive --exclude ".git/*"
aws s3 sync /tmp/foo s3://bucket/
aws s3 cp /tmp/foo/ s3://bucket/ --recursive --exclude "*" --include "*.jpg"
aws s3 cp /tmp/foo/ s3://bucket/ --recursive \
--exclude "*" --include "*.jpg" --include "*.png"
# Get available space
echo $(($(stat -f --format="%a*%S" .)))
df -k .
rsync -avr --exclude='path1/to/exclude' --exclude='path2/to/exclude' source destination
# DEV
# Clone folder
# Clean log, .git
# clean other
# Zip
# add timestamp
# s3 upload
# done
aws glacier list-vaults --account-id 861481479208 --region us-east-1 --profile ub_vbox_1
# LIVE
# Local test
aws glacier list-vaults --account-id 861481479208 --region us-east-1 --profile ub_vbox_1
a.com
x.com
b.com
e.com
q.com
"location": "/861481479666/vaults/Tasman_Glacier/multipart-uploads/iE9Mlx81m8ZndqvS5bPRGL9xB0jwDkeuY9JL7T-4scpRcG_rBNFeS6HyrEoMKqEHGf1GE-a4C4_tZes2zJAaJvEUE-GW",
"uploadId": "iE9Mlx81m8ZndqvS5bPRGL9xB0jwDkeuY9JL7T-4scpRcG_rBNFeS6HyrEoMKqEHGf1GE-a4C4_tZes2zJAaJvEUE-GW"
UPLOADID="19gaRezEXAMPLES6Ry5YYdqthHOC_kGRCT03L9yetr220UmPtBYKk-OssZtLqyFu7sY1_lR7vgFuJV6NtcV5zpsJ"
1M = 1048576.
Part size must not be null, must be a power of two and be between 1048576 and 4294967296 bytes.
{
"location": "/861481479208/vaults/Tasman_Glacier/multipart-uploads/V-q1C_lHaPbjdp3EfdkCx_SLvcq8uPSpDjDUXLDuuVTwBR3WLZNyyzi8hCQBDSui1azYAUn0PPQjzxIevD1Ntj9Wb6j5",
"uploadId": "V-q1C_lHaPbjdp3EfdkCx_SLvcq8uPSpDjDUXLDuuVTwBR3WLZNyyzi8hCQBDSui1azYAUn0PPQjzxIevD1Ntj9Wb6j5"
}
=> upload ID change after change partsize
UPLOADID="V-q1C_lHaPbjdp3EfdkCx_SLvcq8uPSpDjDUXLDuuVTwBR3WLZNyyzi8hCQBDSui1azYAUn0PPQjzxIevD1Ntj9Wb6j5"
aws glacier upload-multipart-part --upload-id $UPLOADID --body dfc_friction_product_Feb_11_sorted.csv.part_aa --range 'bytes 0-8388607/*' --account-id 861481479208 --vault-name Tasman_Glacier
{
"checksum": "6a13bcb793549a84661908a9a627caa6e3a83f15d6fbffbe24a140a947acea02"
}
aws glacier upload-multipart-part --upload-id $UPLOADID --body dfc_friction_product_Feb_11_sorted.csv.part_aa --range 'bytes 8388608-13738264/*' --account-id 861481479208 --vault-name Tasman_Glacier
expr 8388608 + 5349657 = 13738265
(match with original file size; see ls output)
#NOTE => 13738265 - 1 bytes = 13738264
{
"checksum": "a4428ed101120115339e1510238274f6fa8fad4109a3726135038db2b6ce6f5a"
}
openssl dgst -sha256 hash12hash
SHA256(hash12hash)= f40037aa529c9b79f9dfaa491166547bd8078f9ad2e124d0cc2520ce43dc96ab
TREEHASH=15859a177a2882b67ce54393795874a5eb06e03431b47e538839f9cfe6b1861f
aws glacier complete-multipart-upload --checksum $TREEHASH --archive-size 13738265 --upload-id $UPLOADID --account-id 861481479208 --vault-name Tasman_Glacier
expr 1048576 \* 8
8388608
split --bytes=8388608 --verbose dfc_friction_product_Feb_11_sorted.csv dfc_friction_product_Feb_11_sorted.csv.part_
openssl dgst -sha256 -binary _friction_product_Feb_11_sorted.csv.part_aa > hash1
openssl dgst -sha256 -binary _friction_product_Feb_11_sorted.csv.part_ab > hash2
# THIS PART may complex
$ cat hash1 hash2 > hash12
$ openssl dgst -sha256 -binary hash12 > hash12hash
openssl - -sha256 hash12
SHA256(hash12)= 15859a177a2882b67ce54393795874a5eb06e03431b47e538839f9cfe6b1861f
# Using S3 but with Glacier storage Class.
Be sure you understand all of the implications of STANDARD_IA before using it.
Every object has a minimum billable size of 128K and a minimum billable duration of 30 days.
If an object is smaller or deleted sooner, you will still be charged the minimums. Lifecycle Policies are often the preferred solution
We store up to 6mo => not a problem
aws s3 cp help
--storage-class (string) The type of storage to use for the object.
Valid choices are: STANDARD | REDUCED_REDUNDANCY | STANDARD_IA | ONE-
ZONE_IA | INTELLIGENT_TIERING | GLACIER | DEEP_ARCHIVE. Defaults to
'STANDARD'
# Create bucket
aws s3 mb s3://dev_websites_backups --region us-east-1
# Package R1
tar -cf r1concepts.com_2021_04_22.tgz
# -C mean cd to directory first
# -p, --preserve-permissions, --same-permissions extract information about file permissions (default for superuser)
tar -cf brakelabs.com-2021-04-22.tgz -C /var/www/vhosts/.com/httpdocs . \
--exclude='/.git' \
--exclude='/var/www/vhosts/.com/httpdocs/application/logs' \
--exclude='/var/www/vhosts/.com/httpdocs/system' \
--exclude='/var/www/vhosts/.com/httpdocs/vendor'
tar \
--exclude='.git' \
--exclude='application' \
--exclude='application/logs' \
--exclude='admin' \
--exclude='blog' \
--exclude='import_file' \
--exclude='script' \
--exclude='system' \
--exclude='vendor' \
-cvf test.tgz -C /var/www/-dev .
tar \
--exclude='.git' \
--exclude='application/logs' \
--exclude='system' \
--exclude='vendor' \
-cvf test.tgz -C /var/www/-dev .
# Not show list file added -c[v]f
tar \
--exclude='.git' \
--exclude='application/logs' \
--exclude='system' \
--exclude='vendor' \
-cf /var/www/html/test.tgz -C /var/www/-dev .
# Live
# Remove -v to Not show list file added -c[v]f
tar \
--exclude='.git' \
--exclude='application/logs' \
--exclude='system' \
--exclude='vendor' \
-cf /var/www/vhosts/backup/.com-2021-04-22.tgz -C /var/www/vhosts/.com/httpdocs .
Comments
Post a Comment