- Configuration complète Stripe pour les 3 environnements (DEV/REC/PROD) * DEV: Clés TEST Pierre (mode test) * REC: Clés TEST Client (mode test) * PROD: Clés LIVE Client (mode live) - Ajout de la gestion des bases de données immeubles/bâtiments * Configuration buildings_database pour DEV/REC/PROD * Service BuildingService pour enrichissement des adresses - Optimisations pages et améliorations ergonomie - Mises à jour des dépendances Composer - Nettoyage des fichiers obsolètes 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
652 lines
27 KiB
Bash
652 lines
27 KiB
Bash
#!/bin/bash
|
|
|
|
set -uo pipefail
|
|
# Note: Removed -e to allow script to continue on errors
|
|
# Errors are handled explicitly with ERROR_COUNT
|
|
|
|
# Parse command line arguments
|
|
ONLY_DB=false
|
|
if [[ "${1:-}" == "-onlydb" ]]; then
|
|
ONLY_DB=true
|
|
echo "Mode: Database backup only"
|
|
fi
|
|
|
|
# Configuration
|
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
CONFIG_FILE="$SCRIPT_DIR/d6back.yaml"
|
|
LOG_DIR="$SCRIPT_DIR/logs"
|
|
mkdir -p "$LOG_DIR"
|
|
LOG_FILE="$LOG_DIR/d6back-$(date +%Y%m%d).log"
|
|
ERROR_COUNT=0
|
|
RECAP_FILE="/tmp/backup_recap_$$.txt"
|
|
|
|
# Lock file to prevent concurrent executions
|
|
LOCK_FILE="/var/lock/d6back.lock"
|
|
exec 200>"$LOCK_FILE"
|
|
if ! flock -n 200; then
|
|
echo "ERROR: Another backup is already running" >&2
|
|
exit 1
|
|
fi
|
|
trap 'flock -u 200' EXIT
|
|
|
|
# Clean old log files (keep only last 10)
|
|
find "$LOG_DIR" -maxdepth 1 -name "d6back-*.log" -type f 2>/dev/null | sort -r | tail -n +11 | xargs -r rm -f || true
|
|
|
|
# Check dependencies - COMMENTED OUT
|
|
# for cmd in yq ssh tar openssl; do
|
|
# if ! command -v "$cmd" &> /dev/null; then
|
|
# echo "ERROR: $cmd is required but not installed" | tee -a "$LOG_FILE"
|
|
# exit 1
|
|
# fi
|
|
# done
|
|
|
|
# Load config
|
|
DIR_BACKUP=$(yq '.global.dir_backup' "$CONFIG_FILE" | tr -d '"')
|
|
ENC_KEY_PATH=$(yq '.global.enc_key' "$CONFIG_FILE" | tr -d '"')
|
|
BACKUP_SERVER=$(yq '.global.backup_server // "BACKUP"' "$CONFIG_FILE" | tr -d '"')
|
|
EMAIL_TO=$(yq '.global.email_to // "support@unikoffice.com"' "$CONFIG_FILE" | tr -d '"')
|
|
KEEP_DIRS=$(yq '.global.keep_dirs' "$CONFIG_FILE" | tr -d '"')
|
|
KEEP_DB=$(yq '.global.keep_db' "$CONFIG_FILE" | tr -d '"')
|
|
|
|
# Load encryption key
|
|
if [[ ! -f "$ENC_KEY_PATH" ]]; then
|
|
echo "ERROR: Encryption key not found: $ENC_KEY_PATH" | tee -a "$LOG_FILE"
|
|
exit 1
|
|
fi
|
|
ENC_KEY=$(cat "$ENC_KEY_PATH")
|
|
|
|
echo "=== Backup Started $(date) ===" | tee -a "$LOG_FILE"
|
|
echo "Backup directory: $DIR_BACKUP" | tee -a "$LOG_FILE"
|
|
|
|
# Check available disk space
|
|
DISK_USAGE=$(df "$DIR_BACKUP" | tail -1 | awk '{print $5}' | sed 's/%//')
|
|
DISK_FREE=$((100 - DISK_USAGE))
|
|
|
|
if [[ $DISK_FREE -lt 20 ]]; then
|
|
echo "WARNING: Low disk space! Only ${DISK_FREE}% free on backup partition" | tee -a "$LOG_FILE"
|
|
|
|
# Send warning email
|
|
echo "Sending DISK SPACE WARNING email to $EMAIL_TO (${DISK_FREE}% free)" | tee -a "$LOG_FILE"
|
|
if command -v msmtp &> /dev/null; then
|
|
{
|
|
echo "To: $EMAIL_TO"
|
|
echo "Subject: Backup${BACKUP_SERVER} WARNING - Low disk space (${DISK_FREE}% free)"
|
|
echo ""
|
|
echo "WARNING: Low disk space on $(hostname)"
|
|
echo ""
|
|
echo "Backup directory: $DIR_BACKUP"
|
|
echo "Disk usage: ${DISK_USAGE}%"
|
|
echo "Free space: ${DISK_FREE}%"
|
|
echo ""
|
|
echo "The backup will continue but please free up some space soon."
|
|
echo ""
|
|
echo "Date: $(date '+%d.%m.%Y %H:%M')"
|
|
} | msmtp "$EMAIL_TO"
|
|
echo "DISK SPACE WARNING email sent successfully to $EMAIL_TO" | tee -a "$LOG_FILE"
|
|
else
|
|
echo "WARNING: msmtp not found - DISK WARNING email NOT sent" | tee -a "$LOG_FILE"
|
|
fi
|
|
else
|
|
echo "Disk space OK: ${DISK_FREE}% free" | tee -a "$LOG_FILE"
|
|
fi
|
|
|
|
# Initialize recap file
|
|
echo "BACKUP REPORT - $(hostname) - $(date '+%d.%m.%Y %H')h" > "$RECAP_FILE"
|
|
echo "========================================" >> "$RECAP_FILE"
|
|
echo "" >> "$RECAP_FILE"
|
|
|
|
# Function to format size in MB with thousand separator
|
|
format_size_mb() {
|
|
local file="$1"
|
|
if [[ -f "$file" ]]; then
|
|
local size_kb=$(du -k "$file" | cut -f1)
|
|
local size_mb=$((size_kb / 1024))
|
|
# Add thousand separator with printf and sed
|
|
printf "%d" "$size_mb" | sed ':a;s/\B[0-9]\{3\}\>/\.&/;ta'
|
|
else
|
|
echo "0"
|
|
fi
|
|
}
|
|
|
|
# Function to calculate age in days
|
|
get_age_days() {
|
|
local file="$1"
|
|
local now=$(date +%s)
|
|
local file_time=$(stat -c %Y "$file" 2>/dev/null || echo 0)
|
|
echo $(( (now - file_time) / 86400 ))
|
|
}
|
|
|
|
# Function to get week number of year for a file
|
|
get_week_year() {
|
|
local file="$1"
|
|
local file_time=$(stat -c %Y "$file" 2>/dev/null || echo 0)
|
|
date -d "@$file_time" +"%Y-%W"
|
|
}
|
|
|
|
# Function to cleanup old backups according to retention policy
|
|
cleanup_old_backups() {
|
|
local DELETED_COUNT=0
|
|
local KEPT_COUNT=0
|
|
|
|
echo "" | tee -a "$LOG_FILE"
|
|
echo "=== Starting Backup Retention Cleanup ===" | tee -a "$LOG_FILE"
|
|
|
|
# Parse retention periods
|
|
local KEEP_DIRS_DAYS=${KEEP_DIRS%d} # Remove 'd' suffix
|
|
|
|
# Parse database retention (5d,3w,15m)
|
|
IFS=',' read -r KEEP_DB_DAILY KEEP_DB_WEEKLY KEEP_DB_MONTHLY <<< "$KEEP_DB"
|
|
local KEEP_DB_DAILY_DAYS=${KEEP_DB_DAILY%d}
|
|
local KEEP_DB_WEEKLY_WEEKS=${KEEP_DB_WEEKLY%w}
|
|
local KEEP_DB_MONTHLY_MONTHS=${KEEP_DB_MONTHLY%m}
|
|
|
|
# Convert to days
|
|
local KEEP_DB_WEEKLY_DAYS=$((KEEP_DB_WEEKLY_WEEKS * 7))
|
|
local KEEP_DB_MONTHLY_DAYS=$((KEEP_DB_MONTHLY_MONTHS * 30))
|
|
|
|
echo "Retention policy: dirs=${KEEP_DIRS_DAYS}d, db=${KEEP_DB_DAILY_DAYS}d/${KEEP_DB_WEEKLY_WEEKS}w/${KEEP_DB_MONTHLY_MONTHS}m" | tee -a "$LOG_FILE"
|
|
|
|
# Process each host directory
|
|
for host_dir in "$DIR_BACKUP"/*; do
|
|
if [[ ! -d "$host_dir" ]]; then
|
|
continue
|
|
fi
|
|
|
|
local host_name=$(basename "$host_dir")
|
|
echo " Cleaning host: $host_name" | tee -a "$LOG_FILE"
|
|
|
|
# Clean directory backups (*.tar.gz but not *.sql.gz.enc)
|
|
while IFS= read -r -d '' file; do
|
|
if [[ $(basename "$file") == *".sql.gz.enc" ]]; then
|
|
continue # Skip SQL files
|
|
fi
|
|
|
|
local age_days=$(get_age_days "$file")
|
|
|
|
if [[ $age_days -gt $KEEP_DIRS_DAYS ]]; then
|
|
rm -f "$file"
|
|
echo " Deleted: $(basename "$file") (${age_days}d > ${KEEP_DIRS_DAYS}d)" | tee -a "$LOG_FILE"
|
|
((DELETED_COUNT++))
|
|
else
|
|
((KEPT_COUNT++))
|
|
fi
|
|
done < <(find "$host_dir" -name "*.tar.gz" -type f -print0 2>/dev/null)
|
|
|
|
# Clean database backups with retention policy
|
|
declare -A db_files
|
|
|
|
while IFS= read -r -d '' file; do
|
|
local filename=$(basename "$file")
|
|
local db_name=${filename%%_*}
|
|
|
|
if [[ -z "${db_files[$db_name]:-}" ]]; then
|
|
db_files[$db_name]="$file"
|
|
else
|
|
db_files[$db_name]+=$'\n'"$file"
|
|
fi
|
|
done < <(find "$host_dir" -name "*.sql.gz.enc" -type f -print0 2>/dev/null)
|
|
|
|
# Process each database
|
|
for db_name in "${!db_files[@]}"; do
|
|
# Sort files by age (newest first)
|
|
mapfile -t files < <(echo "${db_files[$db_name]}" | while IFS= read -r f; do
|
|
echo "$f"
|
|
done | xargs -I {} stat -c "%Y {}" {} 2>/dev/null | sort -rn | cut -d' ' -f2-)
|
|
|
|
# Track which files to keep
|
|
declare -A keep_daily
|
|
declare -A keep_weekly
|
|
|
|
for file in "${files[@]}"; do
|
|
local age_days=$(get_age_days "$file")
|
|
|
|
if [[ $age_days -le $KEEP_DB_DAILY_DAYS ]]; then
|
|
# Keep all files within daily retention
|
|
((KEPT_COUNT++))
|
|
|
|
elif [[ $age_days -le $KEEP_DB_WEEKLY_DAYS ]]; then
|
|
# Weekly retention: keep one per day
|
|
local file_date=$(date -d "@$(stat -c %Y "$file")" +"%Y-%m-%d")
|
|
|
|
if [[ -z "${keep_daily[$file_date]:-}" ]]; then
|
|
keep_daily[$file_date]="$file"
|
|
((KEPT_COUNT++))
|
|
else
|
|
rm -f "$file"
|
|
((DELETED_COUNT++))
|
|
fi
|
|
|
|
elif [[ $age_days -le $KEEP_DB_MONTHLY_DAYS ]]; then
|
|
# Monthly retention: keep one per week
|
|
local week_year=$(get_week_year "$file")
|
|
|
|
if [[ -z "${keep_weekly[$week_year]:-}" ]]; then
|
|
keep_weekly[$week_year]="$file"
|
|
((KEPT_COUNT++))
|
|
else
|
|
rm -f "$file"
|
|
((DELETED_COUNT++))
|
|
fi
|
|
|
|
else
|
|
# Beyond retention period
|
|
rm -f "$file"
|
|
echo " Deleted: $(basename "$file") (${age_days}d > ${KEEP_DB_MONTHLY_DAYS}d)" | tee -a "$LOG_FILE"
|
|
((DELETED_COUNT++))
|
|
fi
|
|
done
|
|
|
|
unset keep_daily keep_weekly
|
|
done
|
|
|
|
unset db_files
|
|
done
|
|
|
|
echo "Cleanup completed: ${DELETED_COUNT} deleted, ${KEPT_COUNT} kept" | tee -a "$LOG_FILE"
|
|
|
|
# Add cleanup summary to recap file
|
|
echo "" >> "$RECAP_FILE"
|
|
echo "CLEANUP SUMMARY:" >> "$RECAP_FILE"
|
|
echo " Files deleted: $DELETED_COUNT" >> "$RECAP_FILE"
|
|
echo " Files kept: $KEPT_COUNT" >> "$RECAP_FILE"
|
|
}
|
|
|
|
# Function to backup a single database (must be defined before use)
|
|
backup_database() {
|
|
local database="$1"
|
|
local timestamp="$(date +%Y%m%d_%H)"
|
|
local backup_file="$backup_dir/sql/${database}_${timestamp}.sql.gz.enc"
|
|
|
|
echo " Backing up database: $database" | tee -a "$LOG_FILE"
|
|
|
|
if [[ "$ssh_user" != "root" ]]; then
|
|
CMD_PREFIX="sudo"
|
|
else
|
|
CMD_PREFIX=""
|
|
fi
|
|
|
|
# Execute backup with encryption
|
|
# First test MySQL connection to get clear error messages (|| true to continue on error)
|
|
MYSQL_TEST=$(ssh -i "$ssh_key" -p "$ssh_port" -o ConnectTimeout=20 "$ssh_user@$host_ip" \
|
|
"$CMD_PREFIX incus exec $container_name -- bash -c 'cat > /tmp/d6back.cnf << EOF
|
|
[client]
|
|
user=$db_user
|
|
password=$db_pass
|
|
host=$db_host
|
|
EOF
|
|
chmod 600 /tmp/d6back.cnf
|
|
mariadb --defaults-extra-file=/tmp/d6back.cnf -e \"SELECT 1\" 2>&1
|
|
rm -f /tmp/d6back.cnf'" 2>/dev/null || true)
|
|
|
|
if ssh -i "$ssh_key" -p "$ssh_port" -o ConnectTimeout=20 "$ssh_user@$host_ip" \
|
|
"$CMD_PREFIX incus exec $container_name -- bash -c 'cat > /tmp/d6back.cnf << EOF
|
|
[client]
|
|
user=$db_user
|
|
password=$db_pass
|
|
host=$db_host
|
|
EOF
|
|
chmod 600 /tmp/d6back.cnf
|
|
mariadb-dump --defaults-extra-file=/tmp/d6back.cnf --single-transaction --lock-tables=false --add-drop-table --create-options --databases $database 2>/dev/null | sed -e \"/^CREATE DATABASE/s/\\\`$database\\\`/\\\`${database}_${timestamp}\\\`/\" -e \"/^USE/s/\\\`$database\\\`/\\\`${database}_${timestamp}\\\`/\" | gzip
|
|
rm -f /tmp/d6back.cnf'" | \
|
|
openssl enc -aes-256-cbc -salt -pass pass:"$ENC_KEY" -pbkdf2 > "$backup_file" 2>/dev/null; then
|
|
|
|
# Validate backup file size (encrypted SQL should be > 100 bytes)
|
|
if [[ -f "$backup_file" ]]; then
|
|
file_size=$(stat -c%s "$backup_file" 2>/dev/null || echo 0)
|
|
if [[ $file_size -lt 100 ]]; then
|
|
# Analyze MySQL connection test results
|
|
if [[ "$MYSQL_TEST" == *"Access denied"* ]]; then
|
|
echo " ERROR: MySQL authentication failed for $database on $host_name/$container_name" | tee -a "$LOG_FILE"
|
|
echo " User: $db_user@$db_host - Check password in configuration" | tee -a "$LOG_FILE"
|
|
elif [[ "$MYSQL_TEST" == *"Unknown database"* ]]; then
|
|
echo " ERROR: Database '$database' does not exist on $host_name/$container_name" | tee -a "$LOG_FILE"
|
|
elif [[ "$MYSQL_TEST" == *"Can't connect"* ]]; then
|
|
echo " ERROR: Cannot connect to MySQL server at $db_host in $container_name" | tee -a "$LOG_FILE"
|
|
else
|
|
echo " ERROR: Backup file too small (${file_size} bytes): $database on $host_name/$container_name" | tee -a "$LOG_FILE"
|
|
fi
|
|
|
|
((ERROR_COUNT++))
|
|
rm -f "$backup_file"
|
|
else
|
|
size=$(du -h "$backup_file" | cut -f1)
|
|
size_mb=$(format_size_mb "$backup_file")
|
|
echo " ✓ Saved (encrypted): $(basename "$backup_file") ($size)" | tee -a "$LOG_FILE"
|
|
echo " SQL: $(basename "$backup_file") - ${size_mb} Mo" >> "$RECAP_FILE"
|
|
|
|
# Test backup integrity
|
|
if ! openssl enc -aes-256-cbc -d -pass pass:"$ENC_KEY" -pbkdf2 -in "$backup_file" | gunzip -t 2>/dev/null; then
|
|
echo " ERROR: Backup integrity check failed for $database" | tee -a "$LOG_FILE"
|
|
((ERROR_COUNT++))
|
|
fi
|
|
fi
|
|
else
|
|
echo " ERROR: Backup file not created: $database" | tee -a "$LOG_FILE"
|
|
((ERROR_COUNT++))
|
|
fi
|
|
else
|
|
# Analyze MySQL connection test for failed backup
|
|
if [[ "$MYSQL_TEST" == *"Access denied"* ]]; then
|
|
echo " ERROR: MySQL authentication failed for $database on $host_name/$container_name" | tee -a "$LOG_FILE"
|
|
echo " User: $db_user@$db_host - Check password in configuration" | tee -a "$LOG_FILE"
|
|
elif [[ "$MYSQL_TEST" == *"Unknown database"* ]]; then
|
|
echo " ERROR: Database '$database' does not exist on $host_name/$container_name" | tee -a "$LOG_FILE"
|
|
elif [[ "$MYSQL_TEST" == *"Can't connect"* ]]; then
|
|
echo " ERROR: Cannot connect to MySQL server at $db_host in $container_name" | tee -a "$LOG_FILE"
|
|
else
|
|
echo " ERROR: Failed to backup database $database on $host_name/$container_name" | tee -a "$LOG_FILE"
|
|
fi
|
|
|
|
((ERROR_COUNT++))
|
|
rm -f "$backup_file"
|
|
fi
|
|
}
|
|
|
|
# Process each host
|
|
host_count=$(yq '.hosts | length' "$CONFIG_FILE")
|
|
|
|
for ((i=0; i<$host_count; i++)); do
|
|
host_name=$(yq ".hosts[$i].name" "$CONFIG_FILE" | tr -d '"')
|
|
host_ip=$(yq ".hosts[$i].ip" "$CONFIG_FILE" | tr -d '"')
|
|
ssh_user=$(yq ".hosts[$i].user" "$CONFIG_FILE" | tr -d '"')
|
|
ssh_key=$(yq ".hosts[$i].key" "$CONFIG_FILE" | tr -d '"')
|
|
ssh_port=$(yq ".hosts[$i].port // 22" "$CONFIG_FILE" | tr -d '"')
|
|
|
|
echo "Processing host: $host_name ($host_ip)" | tee -a "$LOG_FILE"
|
|
echo "" >> "$RECAP_FILE"
|
|
echo "HOST: $host_name ($host_ip)" >> "$RECAP_FILE"
|
|
echo "----------------------------" >> "$RECAP_FILE"
|
|
|
|
# Test SSH connection
|
|
if ! ssh -i "$ssh_key" -p "$ssh_port" -o ConnectTimeout=20 -o StrictHostKeyChecking=no "$ssh_user@$host_ip" "true" 2>/dev/null; then
|
|
echo " ERROR: Cannot connect to $host_name ($host_ip)" | tee -a "$LOG_FILE"
|
|
((ERROR_COUNT++))
|
|
continue
|
|
fi
|
|
|
|
# Process containers
|
|
container_count=$(yq ".hosts[$i].containers | length" "$CONFIG_FILE" 2>/dev/null || echo "0")
|
|
|
|
for ((c=0; c<$container_count; c++)); do
|
|
container_name=$(yq ".hosts[$i].containers[$c].name" "$CONFIG_FILE" | tr -d '"')
|
|
|
|
echo " Processing container: $container_name" | tee -a "$LOG_FILE"
|
|
|
|
# Add container to recap
|
|
echo "" >> "$RECAP_FILE"
|
|
echo " Container: $container_name" >> "$RECAP_FILE"
|
|
|
|
# Create backup directories
|
|
backup_dir="$DIR_BACKUP/$host_name/$container_name"
|
|
mkdir -p "$backup_dir"
|
|
mkdir -p "$backup_dir/sql"
|
|
|
|
# Backup directories (skip if -onlydb mode)
|
|
if [[ "$ONLY_DB" == "false" ]]; then
|
|
dir_count=$(yq ".hosts[$i].containers[$c].dirs | length" "$CONFIG_FILE" 2>/dev/null || echo "0")
|
|
|
|
for ((d=0; d<$dir_count; d++)); do
|
|
dir_path=$(yq ".hosts[$i].containers[$c].dirs[$d]" "$CONFIG_FILE" | sed 's/^"\|"$//g')
|
|
|
|
# Use sudo if not root
|
|
if [[ "$ssh_user" != "root" ]]; then
|
|
CMD_PREFIX="sudo"
|
|
else
|
|
CMD_PREFIX=""
|
|
fi
|
|
|
|
# Special handling for /var/www - backup each subdirectory separately
|
|
if [[ "$dir_path" == "/var/www" ]]; then
|
|
echo " Backing up subdirectories of $dir_path" | tee -a "$LOG_FILE"
|
|
|
|
# Get list of subdirectories
|
|
subdirs=$(ssh -i "$ssh_key" -p "$ssh_port" -o ConnectTimeout=20 "$ssh_user@$host_ip" \
|
|
"$CMD_PREFIX incus exec $container_name -- find /var/www -maxdepth 1 -type d ! -path /var/www" 2>/dev/null || echo "")
|
|
|
|
for subdir in $subdirs; do
|
|
subdir_name=$(basename "$subdir" | tr '/' '_')
|
|
backup_file="$backup_dir/www_${subdir_name}_$(date +%Y%m%d_%H).tar.gz"
|
|
|
|
echo " Backing up: $subdir" | tee -a "$LOG_FILE"
|
|
|
|
if ssh -i "$ssh_key" -p "$ssh_port" -o ConnectTimeout=20 "$ssh_user@$host_ip" \
|
|
"$CMD_PREFIX incus exec $container_name -- tar czf - $subdir 2>/dev/null" > "$backup_file"; then
|
|
|
|
# Validate backup file size (tar.gz should be > 1KB)
|
|
if [[ -f "$backup_file" ]]; then
|
|
file_size=$(stat -c%s "$backup_file" 2>/dev/null || echo 0)
|
|
if [[ $file_size -lt 1024 ]]; then
|
|
echo " WARNING: Backup file very small (${file_size} bytes): $subdir" | tee -a "$LOG_FILE"
|
|
# Keep the file but note it's small
|
|
size=$(du -h "$backup_file" | cut -f1)
|
|
size_mb=$(format_size_mb "$backup_file")
|
|
echo " ✓ Saved (small): $(basename "$backup_file") ($size)" | tee -a "$LOG_FILE"
|
|
echo " DIR: $(basename "$backup_file") - ${size_mb} Mo (WARNING: small)" >> "$RECAP_FILE"
|
|
else
|
|
size=$(du -h "$backup_file" | cut -f1)
|
|
size_mb=$(format_size_mb "$backup_file")
|
|
echo " ✓ Saved: $(basename "$backup_file") ($size)" | tee -a "$LOG_FILE"
|
|
echo " DIR: $(basename "$backup_file") - ${size_mb} Mo" >> "$RECAP_FILE"
|
|
fi
|
|
|
|
# Test tar integrity
|
|
if ! tar tzf "$backup_file" >/dev/null 2>&1; then
|
|
echo " ERROR: Tar integrity check failed" | tee -a "$LOG_FILE"
|
|
((ERROR_COUNT++))
|
|
fi
|
|
else
|
|
echo " ERROR: Backup file not created: $subdir" | tee -a "$LOG_FILE"
|
|
((ERROR_COUNT++))
|
|
fi
|
|
else
|
|
echo " ERROR: Failed to backup $subdir" | tee -a "$LOG_FILE"
|
|
((ERROR_COUNT++))
|
|
rm -f "$backup_file"
|
|
fi
|
|
done
|
|
else
|
|
# Normal backup for other directories
|
|
dir_name=$(basename "$dir_path" | tr '/' '_')
|
|
backup_file="$backup_dir/${dir_name}_$(date +%Y%m%d_%H).tar.gz"
|
|
|
|
echo " Backing up: $dir_path" | tee -a "$LOG_FILE"
|
|
|
|
if ssh -i "$ssh_key" -p "$ssh_port" -o ConnectTimeout=20 "$ssh_user@$host_ip" \
|
|
"$CMD_PREFIX incus exec $container_name -- tar czf - $dir_path 2>/dev/null" > "$backup_file"; then
|
|
|
|
# Validate backup file size (tar.gz should be > 1KB)
|
|
if [[ -f "$backup_file" ]]; then
|
|
file_size=$(stat -c%s "$backup_file" 2>/dev/null || echo 0)
|
|
if [[ $file_size -lt 1024 ]]; then
|
|
echo " WARNING: Backup file very small (${file_size} bytes): $dir_path" | tee -a "$LOG_FILE"
|
|
# Keep the file but note it's small
|
|
size=$(du -h "$backup_file" | cut -f1)
|
|
size_mb=$(format_size_mb "$backup_file")
|
|
echo " ✓ Saved (small): $(basename "$backup_file") ($size)" | tee -a "$LOG_FILE"
|
|
echo " DIR: $(basename "$backup_file") - ${size_mb} Mo (WARNING: small)" >> "$RECAP_FILE"
|
|
else
|
|
size=$(du -h "$backup_file" | cut -f1)
|
|
size_mb=$(format_size_mb "$backup_file")
|
|
echo " ✓ Saved: $(basename "$backup_file") ($size)" | tee -a "$LOG_FILE"
|
|
echo " DIR: $(basename "$backup_file") - ${size_mb} Mo" >> "$RECAP_FILE"
|
|
fi
|
|
|
|
# Test tar integrity
|
|
if ! tar tzf "$backup_file" >/dev/null 2>&1; then
|
|
echo " ERROR: Tar integrity check failed" | tee -a "$LOG_FILE"
|
|
((ERROR_COUNT++))
|
|
fi
|
|
else
|
|
echo " ERROR: Backup file not created: $dir_path" | tee -a "$LOG_FILE"
|
|
((ERROR_COUNT++))
|
|
fi
|
|
else
|
|
echo " ERROR: Failed to backup $dir_path" | tee -a "$LOG_FILE"
|
|
((ERROR_COUNT++))
|
|
rm -f "$backup_file"
|
|
fi
|
|
fi
|
|
done
|
|
fi # End of directory backup section
|
|
|
|
# Backup databases
|
|
db_user=$(yq ".hosts[$i].containers[$c].db_user" "$CONFIG_FILE" 2>/dev/null | tr -d '"')
|
|
db_pass=$(yq ".hosts[$i].containers[$c].db_pass" "$CONFIG_FILE" 2>/dev/null | tr -d '"')
|
|
db_host=$(yq ".hosts[$i].containers[$c].db_host // \"localhost\"" "$CONFIG_FILE" 2>/dev/null | tr -d '"')
|
|
|
|
# Check if we're in onlydb mode
|
|
if [[ "$ONLY_DB" == "true" ]]; then
|
|
# Use onlydb list if it exists
|
|
onlydb_count=$(yq ".hosts[$i].containers[$c].onlydb | length" "$CONFIG_FILE" 2>/dev/null || echo "0")
|
|
if [[ "$onlydb_count" != "0" ]] && [[ "$onlydb_count" != "null" ]]; then
|
|
db_count="$onlydb_count"
|
|
use_onlydb=true
|
|
else
|
|
# No onlydb list, skip this container in onlydb mode
|
|
continue
|
|
fi
|
|
else
|
|
# Normal mode - use databases list
|
|
db_count=$(yq ".hosts[$i].containers[$c].databases | length" "$CONFIG_FILE" 2>/dev/null || echo "0")
|
|
use_onlydb=false
|
|
fi
|
|
|
|
if [[ -n "$db_user" ]] && [[ -n "$db_pass" ]] && [[ "$db_count" != "0" ]]; then
|
|
for ((db=0; db<$db_count; db++)); do
|
|
if [[ "$use_onlydb" == "true" ]]; then
|
|
db_name=$(yq ".hosts[$i].containers[$c].onlydb[$db]" "$CONFIG_FILE" | tr -d '"')
|
|
else
|
|
db_name=$(yq ".hosts[$i].containers[$c].databases[$db]" "$CONFIG_FILE" | tr -d '"')
|
|
fi
|
|
|
|
if [[ "$db_name" == "ALL" ]]; then
|
|
echo " Fetching all databases..." | tee -a "$LOG_FILE"
|
|
|
|
# Get database list
|
|
if [[ "$ssh_user" != "root" ]]; then
|
|
db_list=$(ssh -i "$ssh_key" -p "$ssh_port" -o ConnectTimeout=20 "$ssh_user@$host_ip" \
|
|
"sudo incus exec $container_name -- bash -c 'cat > /tmp/d6back.cnf << EOF
|
|
[client]
|
|
user=$db_user
|
|
password=$db_pass
|
|
host=$db_host
|
|
EOF
|
|
chmod 600 /tmp/d6back.cnf
|
|
mariadb --defaults-extra-file=/tmp/d6back.cnf -e \"SHOW DATABASES;\" 2>/dev/null
|
|
rm -f /tmp/d6back.cnf'" | \
|
|
grep -Ev '^(Database|information_schema|performance_schema|mysql|sys)$' || echo "")
|
|
else
|
|
db_list=$(ssh -i "$ssh_key" -p "$ssh_port" -o ConnectTimeout=20 "$ssh_user@$host_ip" \
|
|
"incus exec $container_name -- bash -c 'cat > /tmp/d6back.cnf << EOF
|
|
[client]
|
|
user=$db_user
|
|
password=$db_pass
|
|
host=$db_host
|
|
EOF
|
|
chmod 600 /tmp/d6back.cnf
|
|
mariadb --defaults-extra-file=/tmp/d6back.cnf -e \"SHOW DATABASES;\" 2>/dev/null
|
|
rm -f /tmp/d6back.cnf'" | \
|
|
grep -Ev '^(Database|information_schema|performance_schema|mysql|sys)$' || echo "")
|
|
fi
|
|
|
|
# Backup each database
|
|
for single_db in $db_list; do
|
|
backup_database "$single_db"
|
|
done
|
|
else
|
|
backup_database "$db_name"
|
|
fi
|
|
done
|
|
fi
|
|
done
|
|
done
|
|
|
|
echo "=== Backup Completed $(date) ===" | tee -a "$LOG_FILE"
|
|
|
|
# Cleanup old backups according to retention policy
|
|
cleanup_old_backups
|
|
|
|
# Show summary
|
|
total_size=$(du -sh "$DIR_BACKUP" 2>/dev/null | cut -f1)
|
|
echo "Total backup size: $total_size" | tee -a "$LOG_FILE"
|
|
|
|
# Add summary to recap
|
|
echo "" >> "$RECAP_FILE"
|
|
echo "========================================" >> "$RECAP_FILE"
|
|
|
|
# Add size details per host/container
|
|
echo "BACKUP SIZES:" >> "$RECAP_FILE"
|
|
for host_dir in "$DIR_BACKUP"/*; do
|
|
if [[ -d "$host_dir" ]]; then
|
|
host_name=$(basename "$host_dir")
|
|
host_size=$(du -sh "$host_dir" 2>/dev/null | cut -f1)
|
|
echo "" >> "$RECAP_FILE"
|
|
echo " $host_name: $host_size" >> "$RECAP_FILE"
|
|
|
|
# Size per container
|
|
for container_dir in "$host_dir"/*; do
|
|
if [[ -d "$container_dir" ]]; then
|
|
container_name=$(basename "$container_dir")
|
|
container_size=$(du -sh "$container_dir" 2>/dev/null | cut -f1)
|
|
echo " - $container_name: $container_size" >> "$RECAP_FILE"
|
|
fi
|
|
done
|
|
fi
|
|
done
|
|
|
|
echo "" >> "$RECAP_FILE"
|
|
echo "TOTAL SIZE: $total_size" >> "$RECAP_FILE"
|
|
echo "COMPLETED: $(date '+%d.%m.%Y %H:%M')" >> "$RECAP_FILE"
|
|
|
|
# Prepare email subject with date format
|
|
DATE_SUBJECT=$(date '+%d.%m.%Y %H')
|
|
|
|
# Send recap email
|
|
if [[ $ERROR_COUNT -gt 0 ]]; then
|
|
echo "Total errors: $ERROR_COUNT" | tee -a "$LOG_FILE"
|
|
|
|
# Add errors to recap
|
|
echo "" >> "$RECAP_FILE"
|
|
echo "ERRORS DETECTED: $ERROR_COUNT" >> "$RECAP_FILE"
|
|
echo "----------------------------" >> "$RECAP_FILE"
|
|
grep -i "ERROR" "$LOG_FILE" >> "$RECAP_FILE"
|
|
|
|
# Send email with ERROR in subject
|
|
echo "Sending ERROR email to $EMAIL_TO (Errors found: $ERROR_COUNT)" | tee -a "$LOG_FILE"
|
|
if command -v msmtp &> /dev/null; then
|
|
{
|
|
echo "To: $EMAIL_TO"
|
|
echo "Subject: Backup${BACKUP_SERVER} ERROR $DATE_SUBJECT"
|
|
echo ""
|
|
cat "$RECAP_FILE"
|
|
} | msmtp "$EMAIL_TO"
|
|
echo "ERROR email sent successfully to $EMAIL_TO" | tee -a "$LOG_FILE"
|
|
else
|
|
echo "WARNING: msmtp not found - ERROR email NOT sent" | tee -a "$LOG_FILE"
|
|
fi
|
|
else
|
|
echo "Backup completed successfully with no errors" | tee -a "$LOG_FILE"
|
|
|
|
# Send success recap email
|
|
echo "Sending SUCCESS recap email to $EMAIL_TO" | tee -a "$LOG_FILE"
|
|
if command -v msmtp &> /dev/null; then
|
|
{
|
|
echo "To: $EMAIL_TO"
|
|
echo "Subject: Backup${BACKUP_SERVER} $DATE_SUBJECT"
|
|
echo ""
|
|
cat "$RECAP_FILE"
|
|
} | msmtp "$EMAIL_TO"
|
|
echo "SUCCESS recap email sent successfully to $EMAIL_TO" | tee -a "$LOG_FILE"
|
|
else
|
|
echo "WARNING: msmtp not found - SUCCESS recap email NOT sent" | tee -a "$LOG_FILE"
|
|
fi
|
|
fi
|
|
|
|
# Clean up recap file
|
|
rm -f "$RECAP_FILE"
|
|
|
|
# Exit with error code if there were errors
|
|
if [[ $ERROR_COUNT -gt 0 ]]; then
|
|
exit 1
|
|
fi
|