feat: Version 3.5.2 - Configuration Stripe et gestion des immeubles
- Configuration complète Stripe pour les 3 environnements (DEV/REC/PROD) * DEV: Clés TEST Pierre (mode test) * REC: Clés TEST Client (mode test) * PROD: Clés LIVE Client (mode live) - Ajout de la gestion des bases de données immeubles/bâtiments * Configuration buildings_database pour DEV/REC/PROD * Service BuildingService pour enrichissement des adresses - Optimisations pages et améliorations ergonomie - Mises à jour des dépendances Composer - Nettoyage des fichiers obsolètes 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
651
api/PM7/d6back.sh
Normal file
651
api/PM7/d6back.sh
Normal file
@@ -0,0 +1,651 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -uo pipefail
|
||||
# Note: Removed -e to allow script to continue on errors
|
||||
# Errors are handled explicitly with ERROR_COUNT
|
||||
|
||||
# Parse command line arguments
|
||||
ONLY_DB=false
|
||||
if [[ "${1:-}" == "-onlydb" ]]; then
|
||||
ONLY_DB=true
|
||||
echo "Mode: Database backup only"
|
||||
fi
|
||||
|
||||
# Configuration
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
CONFIG_FILE="$SCRIPT_DIR/d6back.yaml"
|
||||
LOG_DIR="$SCRIPT_DIR/logs"
|
||||
mkdir -p "$LOG_DIR"
|
||||
LOG_FILE="$LOG_DIR/d6back-$(date +%Y%m%d).log"
|
||||
ERROR_COUNT=0
|
||||
RECAP_FILE="/tmp/backup_recap_$$.txt"
|
||||
|
||||
# Lock file to prevent concurrent executions
|
||||
LOCK_FILE="/var/lock/d6back.lock"
|
||||
exec 200>"$LOCK_FILE"
|
||||
if ! flock -n 200; then
|
||||
echo "ERROR: Another backup is already running" >&2
|
||||
exit 1
|
||||
fi
|
||||
trap 'flock -u 200' EXIT
|
||||
|
||||
# Clean old log files (keep only last 10)
|
||||
find "$LOG_DIR" -maxdepth 1 -name "d6back-*.log" -type f 2>/dev/null | sort -r | tail -n +11 | xargs -r rm -f || true
|
||||
|
||||
# Check dependencies - COMMENTED OUT
|
||||
# for cmd in yq ssh tar openssl; do
|
||||
# if ! command -v "$cmd" &> /dev/null; then
|
||||
# echo "ERROR: $cmd is required but not installed" | tee -a "$LOG_FILE"
|
||||
# exit 1
|
||||
# fi
|
||||
# done
|
||||
|
||||
# Load config
|
||||
DIR_BACKUP=$(yq '.global.dir_backup' "$CONFIG_FILE" | tr -d '"')
|
||||
ENC_KEY_PATH=$(yq '.global.enc_key' "$CONFIG_FILE" | tr -d '"')
|
||||
BACKUP_SERVER=$(yq '.global.backup_server // "BACKUP"' "$CONFIG_FILE" | tr -d '"')
|
||||
EMAIL_TO=$(yq '.global.email_to // "support@unikoffice.com"' "$CONFIG_FILE" | tr -d '"')
|
||||
KEEP_DIRS=$(yq '.global.keep_dirs' "$CONFIG_FILE" | tr -d '"')
|
||||
KEEP_DB=$(yq '.global.keep_db' "$CONFIG_FILE" | tr -d '"')
|
||||
|
||||
# Load encryption key
|
||||
if [[ ! -f "$ENC_KEY_PATH" ]]; then
|
||||
echo "ERROR: Encryption key not found: $ENC_KEY_PATH" | tee -a "$LOG_FILE"
|
||||
exit 1
|
||||
fi
|
||||
ENC_KEY=$(cat "$ENC_KEY_PATH")
|
||||
|
||||
echo "=== Backup Started $(date) ===" | tee -a "$LOG_FILE"
|
||||
echo "Backup directory: $DIR_BACKUP" | tee -a "$LOG_FILE"
|
||||
|
||||
# Check available disk space
|
||||
DISK_USAGE=$(df "$DIR_BACKUP" | tail -1 | awk '{print $5}' | sed 's/%//')
|
||||
DISK_FREE=$((100 - DISK_USAGE))
|
||||
|
||||
if [[ $DISK_FREE -lt 20 ]]; then
|
||||
echo "WARNING: Low disk space! Only ${DISK_FREE}% free on backup partition" | tee -a "$LOG_FILE"
|
||||
|
||||
# Send warning email
|
||||
echo "Sending DISK SPACE WARNING email to $EMAIL_TO (${DISK_FREE}% free)" | tee -a "$LOG_FILE"
|
||||
if command -v msmtp &> /dev/null; then
|
||||
{
|
||||
echo "To: $EMAIL_TO"
|
||||
echo "Subject: Backup${BACKUP_SERVER} WARNING - Low disk space (${DISK_FREE}% free)"
|
||||
echo ""
|
||||
echo "WARNING: Low disk space on $(hostname)"
|
||||
echo ""
|
||||
echo "Backup directory: $DIR_BACKUP"
|
||||
echo "Disk usage: ${DISK_USAGE}%"
|
||||
echo "Free space: ${DISK_FREE}%"
|
||||
echo ""
|
||||
echo "The backup will continue but please free up some space soon."
|
||||
echo ""
|
||||
echo "Date: $(date '+%d.%m.%Y %H:%M')"
|
||||
} | msmtp "$EMAIL_TO"
|
||||
echo "DISK SPACE WARNING email sent successfully to $EMAIL_TO" | tee -a "$LOG_FILE"
|
||||
else
|
||||
echo "WARNING: msmtp not found - DISK WARNING email NOT sent" | tee -a "$LOG_FILE"
|
||||
fi
|
||||
else
|
||||
echo "Disk space OK: ${DISK_FREE}% free" | tee -a "$LOG_FILE"
|
||||
fi
|
||||
|
||||
# Initialize recap file
|
||||
echo "BACKUP REPORT - $(hostname) - $(date '+%d.%m.%Y %H')h" > "$RECAP_FILE"
|
||||
echo "========================================" >> "$RECAP_FILE"
|
||||
echo "" >> "$RECAP_FILE"
|
||||
|
||||
# Function to format size in MB with thousand separator
|
||||
format_size_mb() {
|
||||
local file="$1"
|
||||
if [[ -f "$file" ]]; then
|
||||
local size_kb=$(du -k "$file" | cut -f1)
|
||||
local size_mb=$((size_kb / 1024))
|
||||
# Add thousand separator with printf and sed
|
||||
printf "%d" "$size_mb" | sed ':a;s/\B[0-9]\{3\}\>/\.&/;ta'
|
||||
else
|
||||
echo "0"
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to calculate age in days
|
||||
get_age_days() {
|
||||
local file="$1"
|
||||
local now=$(date +%s)
|
||||
local file_time=$(stat -c %Y "$file" 2>/dev/null || echo 0)
|
||||
echo $(( (now - file_time) / 86400 ))
|
||||
}
|
||||
|
||||
# Function to get week number of year for a file
|
||||
get_week_year() {
|
||||
local file="$1"
|
||||
local file_time=$(stat -c %Y "$file" 2>/dev/null || echo 0)
|
||||
date -d "@$file_time" +"%Y-%W"
|
||||
}
|
||||
|
||||
# Function to cleanup old backups according to retention policy
|
||||
cleanup_old_backups() {
|
||||
local DELETED_COUNT=0
|
||||
local KEPT_COUNT=0
|
||||
|
||||
echo "" | tee -a "$LOG_FILE"
|
||||
echo "=== Starting Backup Retention Cleanup ===" | tee -a "$LOG_FILE"
|
||||
|
||||
# Parse retention periods
|
||||
local KEEP_DIRS_DAYS=${KEEP_DIRS%d} # Remove 'd' suffix
|
||||
|
||||
# Parse database retention (5d,3w,15m)
|
||||
IFS=',' read -r KEEP_DB_DAILY KEEP_DB_WEEKLY KEEP_DB_MONTHLY <<< "$KEEP_DB"
|
||||
local KEEP_DB_DAILY_DAYS=${KEEP_DB_DAILY%d}
|
||||
local KEEP_DB_WEEKLY_WEEKS=${KEEP_DB_WEEKLY%w}
|
||||
local KEEP_DB_MONTHLY_MONTHS=${KEEP_DB_MONTHLY%m}
|
||||
|
||||
# Convert to days
|
||||
local KEEP_DB_WEEKLY_DAYS=$((KEEP_DB_WEEKLY_WEEKS * 7))
|
||||
local KEEP_DB_MONTHLY_DAYS=$((KEEP_DB_MONTHLY_MONTHS * 30))
|
||||
|
||||
echo "Retention policy: dirs=${KEEP_DIRS_DAYS}d, db=${KEEP_DB_DAILY_DAYS}d/${KEEP_DB_WEEKLY_WEEKS}w/${KEEP_DB_MONTHLY_MONTHS}m" | tee -a "$LOG_FILE"
|
||||
|
||||
# Process each host directory
|
||||
for host_dir in "$DIR_BACKUP"/*; do
|
||||
if [[ ! -d "$host_dir" ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
local host_name=$(basename "$host_dir")
|
||||
echo " Cleaning host: $host_name" | tee -a "$LOG_FILE"
|
||||
|
||||
# Clean directory backups (*.tar.gz but not *.sql.gz.enc)
|
||||
while IFS= read -r -d '' file; do
|
||||
if [[ $(basename "$file") == *".sql.gz.enc" ]]; then
|
||||
continue # Skip SQL files
|
||||
fi
|
||||
|
||||
local age_days=$(get_age_days "$file")
|
||||
|
||||
if [[ $age_days -gt $KEEP_DIRS_DAYS ]]; then
|
||||
rm -f "$file"
|
||||
echo " Deleted: $(basename "$file") (${age_days}d > ${KEEP_DIRS_DAYS}d)" | tee -a "$LOG_FILE"
|
||||
((DELETED_COUNT++))
|
||||
else
|
||||
((KEPT_COUNT++))
|
||||
fi
|
||||
done < <(find "$host_dir" -name "*.tar.gz" -type f -print0 2>/dev/null)
|
||||
|
||||
# Clean database backups with retention policy
|
||||
declare -A db_files
|
||||
|
||||
while IFS= read -r -d '' file; do
|
||||
local filename=$(basename "$file")
|
||||
local db_name=${filename%%_*}
|
||||
|
||||
if [[ -z "${db_files[$db_name]:-}" ]]; then
|
||||
db_files[$db_name]="$file"
|
||||
else
|
||||
db_files[$db_name]+=$'\n'"$file"
|
||||
fi
|
||||
done < <(find "$host_dir" -name "*.sql.gz.enc" -type f -print0 2>/dev/null)
|
||||
|
||||
# Process each database
|
||||
for db_name in "${!db_files[@]}"; do
|
||||
# Sort files by age (newest first)
|
||||
mapfile -t files < <(echo "${db_files[$db_name]}" | while IFS= read -r f; do
|
||||
echo "$f"
|
||||
done | xargs -I {} stat -c "%Y {}" {} 2>/dev/null | sort -rn | cut -d' ' -f2-)
|
||||
|
||||
# Track which files to keep
|
||||
declare -A keep_daily
|
||||
declare -A keep_weekly
|
||||
|
||||
for file in "${files[@]}"; do
|
||||
local age_days=$(get_age_days "$file")
|
||||
|
||||
if [[ $age_days -le $KEEP_DB_DAILY_DAYS ]]; then
|
||||
# Keep all files within daily retention
|
||||
((KEPT_COUNT++))
|
||||
|
||||
elif [[ $age_days -le $KEEP_DB_WEEKLY_DAYS ]]; then
|
||||
# Weekly retention: keep one per day
|
||||
local file_date=$(date -d "@$(stat -c %Y "$file")" +"%Y-%m-%d")
|
||||
|
||||
if [[ -z "${keep_daily[$file_date]:-}" ]]; then
|
||||
keep_daily[$file_date]="$file"
|
||||
((KEPT_COUNT++))
|
||||
else
|
||||
rm -f "$file"
|
||||
((DELETED_COUNT++))
|
||||
fi
|
||||
|
||||
elif [[ $age_days -le $KEEP_DB_MONTHLY_DAYS ]]; then
|
||||
# Monthly retention: keep one per week
|
||||
local week_year=$(get_week_year "$file")
|
||||
|
||||
if [[ -z "${keep_weekly[$week_year]:-}" ]]; then
|
||||
keep_weekly[$week_year]="$file"
|
||||
((KEPT_COUNT++))
|
||||
else
|
||||
rm -f "$file"
|
||||
((DELETED_COUNT++))
|
||||
fi
|
||||
|
||||
else
|
||||
# Beyond retention period
|
||||
rm -f "$file"
|
||||
echo " Deleted: $(basename "$file") (${age_days}d > ${KEEP_DB_MONTHLY_DAYS}d)" | tee -a "$LOG_FILE"
|
||||
((DELETED_COUNT++))
|
||||
fi
|
||||
done
|
||||
|
||||
unset keep_daily keep_weekly
|
||||
done
|
||||
|
||||
unset db_files
|
||||
done
|
||||
|
||||
echo "Cleanup completed: ${DELETED_COUNT} deleted, ${KEPT_COUNT} kept" | tee -a "$LOG_FILE"
|
||||
|
||||
# Add cleanup summary to recap file
|
||||
echo "" >> "$RECAP_FILE"
|
||||
echo "CLEANUP SUMMARY:" >> "$RECAP_FILE"
|
||||
echo " Files deleted: $DELETED_COUNT" >> "$RECAP_FILE"
|
||||
echo " Files kept: $KEPT_COUNT" >> "$RECAP_FILE"
|
||||
}
|
||||
|
||||
# Function to backup a single database (must be defined before use)
|
||||
backup_database() {
|
||||
local database="$1"
|
||||
local timestamp="$(date +%Y%m%d_%H)"
|
||||
local backup_file="$backup_dir/sql/${database}_${timestamp}.sql.gz.enc"
|
||||
|
||||
echo " Backing up database: $database" | tee -a "$LOG_FILE"
|
||||
|
||||
if [[ "$ssh_user" != "root" ]]; then
|
||||
CMD_PREFIX="sudo"
|
||||
else
|
||||
CMD_PREFIX=""
|
||||
fi
|
||||
|
||||
# Execute backup with encryption
|
||||
# First test MySQL connection to get clear error messages (|| true to continue on error)
|
||||
MYSQL_TEST=$(ssh -i "$ssh_key" -p "$ssh_port" -o ConnectTimeout=20 "$ssh_user@$host_ip" \
|
||||
"$CMD_PREFIX incus exec $container_name -- bash -c 'cat > /tmp/d6back.cnf << EOF
|
||||
[client]
|
||||
user=$db_user
|
||||
password=$db_pass
|
||||
host=$db_host
|
||||
EOF
|
||||
chmod 600 /tmp/d6back.cnf
|
||||
mariadb --defaults-extra-file=/tmp/d6back.cnf -e \"SELECT 1\" 2>&1
|
||||
rm -f /tmp/d6back.cnf'" 2>/dev/null || true)
|
||||
|
||||
if ssh -i "$ssh_key" -p "$ssh_port" -o ConnectTimeout=20 "$ssh_user@$host_ip" \
|
||||
"$CMD_PREFIX incus exec $container_name -- bash -c 'cat > /tmp/d6back.cnf << EOF
|
||||
[client]
|
||||
user=$db_user
|
||||
password=$db_pass
|
||||
host=$db_host
|
||||
EOF
|
||||
chmod 600 /tmp/d6back.cnf
|
||||
mariadb-dump --defaults-extra-file=/tmp/d6back.cnf --single-transaction --lock-tables=false --add-drop-table --create-options --databases $database 2>/dev/null | sed -e \"/^CREATE DATABASE/s/\\\`$database\\\`/\\\`${database}_${timestamp}\\\`/\" -e \"/^USE/s/\\\`$database\\\`/\\\`${database}_${timestamp}\\\`/\" | gzip
|
||||
rm -f /tmp/d6back.cnf'" | \
|
||||
openssl enc -aes-256-cbc -salt -pass pass:"$ENC_KEY" -pbkdf2 > "$backup_file" 2>/dev/null; then
|
||||
|
||||
# Validate backup file size (encrypted SQL should be > 100 bytes)
|
||||
if [[ -f "$backup_file" ]]; then
|
||||
file_size=$(stat -c%s "$backup_file" 2>/dev/null || echo 0)
|
||||
if [[ $file_size -lt 100 ]]; then
|
||||
# Analyze MySQL connection test results
|
||||
if [[ "$MYSQL_TEST" == *"Access denied"* ]]; then
|
||||
echo " ERROR: MySQL authentication failed for $database on $host_name/$container_name" | tee -a "$LOG_FILE"
|
||||
echo " User: $db_user@$db_host - Check password in configuration" | tee -a "$LOG_FILE"
|
||||
elif [[ "$MYSQL_TEST" == *"Unknown database"* ]]; then
|
||||
echo " ERROR: Database '$database' does not exist on $host_name/$container_name" | tee -a "$LOG_FILE"
|
||||
elif [[ "$MYSQL_TEST" == *"Can't connect"* ]]; then
|
||||
echo " ERROR: Cannot connect to MySQL server at $db_host in $container_name" | tee -a "$LOG_FILE"
|
||||
else
|
||||
echo " ERROR: Backup file too small (${file_size} bytes): $database on $host_name/$container_name" | tee -a "$LOG_FILE"
|
||||
fi
|
||||
|
||||
((ERROR_COUNT++))
|
||||
rm -f "$backup_file"
|
||||
else
|
||||
size=$(du -h "$backup_file" | cut -f1)
|
||||
size_mb=$(format_size_mb "$backup_file")
|
||||
echo " ✓ Saved (encrypted): $(basename "$backup_file") ($size)" | tee -a "$LOG_FILE"
|
||||
echo " SQL: $(basename "$backup_file") - ${size_mb} Mo" >> "$RECAP_FILE"
|
||||
|
||||
# Test backup integrity
|
||||
if ! openssl enc -aes-256-cbc -d -pass pass:"$ENC_KEY" -pbkdf2 -in "$backup_file" | gunzip -t 2>/dev/null; then
|
||||
echo " ERROR: Backup integrity check failed for $database" | tee -a "$LOG_FILE"
|
||||
((ERROR_COUNT++))
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo " ERROR: Backup file not created: $database" | tee -a "$LOG_FILE"
|
||||
((ERROR_COUNT++))
|
||||
fi
|
||||
else
|
||||
# Analyze MySQL connection test for failed backup
|
||||
if [[ "$MYSQL_TEST" == *"Access denied"* ]]; then
|
||||
echo " ERROR: MySQL authentication failed for $database on $host_name/$container_name" | tee -a "$LOG_FILE"
|
||||
echo " User: $db_user@$db_host - Check password in configuration" | tee -a "$LOG_FILE"
|
||||
elif [[ "$MYSQL_TEST" == *"Unknown database"* ]]; then
|
||||
echo " ERROR: Database '$database' does not exist on $host_name/$container_name" | tee -a "$LOG_FILE"
|
||||
elif [[ "$MYSQL_TEST" == *"Can't connect"* ]]; then
|
||||
echo " ERROR: Cannot connect to MySQL server at $db_host in $container_name" | tee -a "$LOG_FILE"
|
||||
else
|
||||
echo " ERROR: Failed to backup database $database on $host_name/$container_name" | tee -a "$LOG_FILE"
|
||||
fi
|
||||
|
||||
((ERROR_COUNT++))
|
||||
rm -f "$backup_file"
|
||||
fi
|
||||
}
|
||||
|
||||
# Process each host
|
||||
host_count=$(yq '.hosts | length' "$CONFIG_FILE")
|
||||
|
||||
for ((i=0; i<$host_count; i++)); do
|
||||
host_name=$(yq ".hosts[$i].name" "$CONFIG_FILE" | tr -d '"')
|
||||
host_ip=$(yq ".hosts[$i].ip" "$CONFIG_FILE" | tr -d '"')
|
||||
ssh_user=$(yq ".hosts[$i].user" "$CONFIG_FILE" | tr -d '"')
|
||||
ssh_key=$(yq ".hosts[$i].key" "$CONFIG_FILE" | tr -d '"')
|
||||
ssh_port=$(yq ".hosts[$i].port // 22" "$CONFIG_FILE" | tr -d '"')
|
||||
|
||||
echo "Processing host: $host_name ($host_ip)" | tee -a "$LOG_FILE"
|
||||
echo "" >> "$RECAP_FILE"
|
||||
echo "HOST: $host_name ($host_ip)" >> "$RECAP_FILE"
|
||||
echo "----------------------------" >> "$RECAP_FILE"
|
||||
|
||||
# Test SSH connection
|
||||
if ! ssh -i "$ssh_key" -p "$ssh_port" -o ConnectTimeout=20 -o StrictHostKeyChecking=no "$ssh_user@$host_ip" "true" 2>/dev/null; then
|
||||
echo " ERROR: Cannot connect to $host_name ($host_ip)" | tee -a "$LOG_FILE"
|
||||
((ERROR_COUNT++))
|
||||
continue
|
||||
fi
|
||||
|
||||
# Process containers
|
||||
container_count=$(yq ".hosts[$i].containers | length" "$CONFIG_FILE" 2>/dev/null || echo "0")
|
||||
|
||||
for ((c=0; c<$container_count; c++)); do
|
||||
container_name=$(yq ".hosts[$i].containers[$c].name" "$CONFIG_FILE" | tr -d '"')
|
||||
|
||||
echo " Processing container: $container_name" | tee -a "$LOG_FILE"
|
||||
|
||||
# Add container to recap
|
||||
echo "" >> "$RECAP_FILE"
|
||||
echo " Container: $container_name" >> "$RECAP_FILE"
|
||||
|
||||
# Create backup directories
|
||||
backup_dir="$DIR_BACKUP/$host_name/$container_name"
|
||||
mkdir -p "$backup_dir"
|
||||
mkdir -p "$backup_dir/sql"
|
||||
|
||||
# Backup directories (skip if -onlydb mode)
|
||||
if [[ "$ONLY_DB" == "false" ]]; then
|
||||
dir_count=$(yq ".hosts[$i].containers[$c].dirs | length" "$CONFIG_FILE" 2>/dev/null || echo "0")
|
||||
|
||||
for ((d=0; d<$dir_count; d++)); do
|
||||
dir_path=$(yq ".hosts[$i].containers[$c].dirs[$d]" "$CONFIG_FILE" | sed 's/^"\|"$//g')
|
||||
|
||||
# Use sudo if not root
|
||||
if [[ "$ssh_user" != "root" ]]; then
|
||||
CMD_PREFIX="sudo"
|
||||
else
|
||||
CMD_PREFIX=""
|
||||
fi
|
||||
|
||||
# Special handling for /var/www - backup each subdirectory separately
|
||||
if [[ "$dir_path" == "/var/www" ]]; then
|
||||
echo " Backing up subdirectories of $dir_path" | tee -a "$LOG_FILE"
|
||||
|
||||
# Get list of subdirectories
|
||||
subdirs=$(ssh -i "$ssh_key" -p "$ssh_port" -o ConnectTimeout=20 "$ssh_user@$host_ip" \
|
||||
"$CMD_PREFIX incus exec $container_name -- find /var/www -maxdepth 1 -type d ! -path /var/www" 2>/dev/null || echo "")
|
||||
|
||||
for subdir in $subdirs; do
|
||||
subdir_name=$(basename "$subdir" | tr '/' '_')
|
||||
backup_file="$backup_dir/www_${subdir_name}_$(date +%Y%m%d_%H).tar.gz"
|
||||
|
||||
echo " Backing up: $subdir" | tee -a "$LOG_FILE"
|
||||
|
||||
if ssh -i "$ssh_key" -p "$ssh_port" -o ConnectTimeout=20 "$ssh_user@$host_ip" \
|
||||
"$CMD_PREFIX incus exec $container_name -- tar czf - $subdir 2>/dev/null" > "$backup_file"; then
|
||||
|
||||
# Validate backup file size (tar.gz should be > 1KB)
|
||||
if [[ -f "$backup_file" ]]; then
|
||||
file_size=$(stat -c%s "$backup_file" 2>/dev/null || echo 0)
|
||||
if [[ $file_size -lt 1024 ]]; then
|
||||
echo " WARNING: Backup file very small (${file_size} bytes): $subdir" | tee -a "$LOG_FILE"
|
||||
# Keep the file but note it's small
|
||||
size=$(du -h "$backup_file" | cut -f1)
|
||||
size_mb=$(format_size_mb "$backup_file")
|
||||
echo " ✓ Saved (small): $(basename "$backup_file") ($size)" | tee -a "$LOG_FILE"
|
||||
echo " DIR: $(basename "$backup_file") - ${size_mb} Mo (WARNING: small)" >> "$RECAP_FILE"
|
||||
else
|
||||
size=$(du -h "$backup_file" | cut -f1)
|
||||
size_mb=$(format_size_mb "$backup_file")
|
||||
echo " ✓ Saved: $(basename "$backup_file") ($size)" | tee -a "$LOG_FILE"
|
||||
echo " DIR: $(basename "$backup_file") - ${size_mb} Mo" >> "$RECAP_FILE"
|
||||
fi
|
||||
|
||||
# Test tar integrity
|
||||
if ! tar tzf "$backup_file" >/dev/null 2>&1; then
|
||||
echo " ERROR: Tar integrity check failed" | tee -a "$LOG_FILE"
|
||||
((ERROR_COUNT++))
|
||||
fi
|
||||
else
|
||||
echo " ERROR: Backup file not created: $subdir" | tee -a "$LOG_FILE"
|
||||
((ERROR_COUNT++))
|
||||
fi
|
||||
else
|
||||
echo " ERROR: Failed to backup $subdir" | tee -a "$LOG_FILE"
|
||||
((ERROR_COUNT++))
|
||||
rm -f "$backup_file"
|
||||
fi
|
||||
done
|
||||
else
|
||||
# Normal backup for other directories
|
||||
dir_name=$(basename "$dir_path" | tr '/' '_')
|
||||
backup_file="$backup_dir/${dir_name}_$(date +%Y%m%d_%H).tar.gz"
|
||||
|
||||
echo " Backing up: $dir_path" | tee -a "$LOG_FILE"
|
||||
|
||||
if ssh -i "$ssh_key" -p "$ssh_port" -o ConnectTimeout=20 "$ssh_user@$host_ip" \
|
||||
"$CMD_PREFIX incus exec $container_name -- tar czf - $dir_path 2>/dev/null" > "$backup_file"; then
|
||||
|
||||
# Validate backup file size (tar.gz should be > 1KB)
|
||||
if [[ -f "$backup_file" ]]; then
|
||||
file_size=$(stat -c%s "$backup_file" 2>/dev/null || echo 0)
|
||||
if [[ $file_size -lt 1024 ]]; then
|
||||
echo " WARNING: Backup file very small (${file_size} bytes): $dir_path" | tee -a "$LOG_FILE"
|
||||
# Keep the file but note it's small
|
||||
size=$(du -h "$backup_file" | cut -f1)
|
||||
size_mb=$(format_size_mb "$backup_file")
|
||||
echo " ✓ Saved (small): $(basename "$backup_file") ($size)" | tee -a "$LOG_FILE"
|
||||
echo " DIR: $(basename "$backup_file") - ${size_mb} Mo (WARNING: small)" >> "$RECAP_FILE"
|
||||
else
|
||||
size=$(du -h "$backup_file" | cut -f1)
|
||||
size_mb=$(format_size_mb "$backup_file")
|
||||
echo " ✓ Saved: $(basename "$backup_file") ($size)" | tee -a "$LOG_FILE"
|
||||
echo " DIR: $(basename "$backup_file") - ${size_mb} Mo" >> "$RECAP_FILE"
|
||||
fi
|
||||
|
||||
# Test tar integrity
|
||||
if ! tar tzf "$backup_file" >/dev/null 2>&1; then
|
||||
echo " ERROR: Tar integrity check failed" | tee -a "$LOG_FILE"
|
||||
((ERROR_COUNT++))
|
||||
fi
|
||||
else
|
||||
echo " ERROR: Backup file not created: $dir_path" | tee -a "$LOG_FILE"
|
||||
((ERROR_COUNT++))
|
||||
fi
|
||||
else
|
||||
echo " ERROR: Failed to backup $dir_path" | tee -a "$LOG_FILE"
|
||||
((ERROR_COUNT++))
|
||||
rm -f "$backup_file"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi # End of directory backup section
|
||||
|
||||
# Backup databases
|
||||
db_user=$(yq ".hosts[$i].containers[$c].db_user" "$CONFIG_FILE" 2>/dev/null | tr -d '"')
|
||||
db_pass=$(yq ".hosts[$i].containers[$c].db_pass" "$CONFIG_FILE" 2>/dev/null | tr -d '"')
|
||||
db_host=$(yq ".hosts[$i].containers[$c].db_host // \"localhost\"" "$CONFIG_FILE" 2>/dev/null | tr -d '"')
|
||||
|
||||
# Check if we're in onlydb mode
|
||||
if [[ "$ONLY_DB" == "true" ]]; then
|
||||
# Use onlydb list if it exists
|
||||
onlydb_count=$(yq ".hosts[$i].containers[$c].onlydb | length" "$CONFIG_FILE" 2>/dev/null || echo "0")
|
||||
if [[ "$onlydb_count" != "0" ]] && [[ "$onlydb_count" != "null" ]]; then
|
||||
db_count="$onlydb_count"
|
||||
use_onlydb=true
|
||||
else
|
||||
# No onlydb list, skip this container in onlydb mode
|
||||
continue
|
||||
fi
|
||||
else
|
||||
# Normal mode - use databases list
|
||||
db_count=$(yq ".hosts[$i].containers[$c].databases | length" "$CONFIG_FILE" 2>/dev/null || echo "0")
|
||||
use_onlydb=false
|
||||
fi
|
||||
|
||||
if [[ -n "$db_user" ]] && [[ -n "$db_pass" ]] && [[ "$db_count" != "0" ]]; then
|
||||
for ((db=0; db<$db_count; db++)); do
|
||||
if [[ "$use_onlydb" == "true" ]]; then
|
||||
db_name=$(yq ".hosts[$i].containers[$c].onlydb[$db]" "$CONFIG_FILE" | tr -d '"')
|
||||
else
|
||||
db_name=$(yq ".hosts[$i].containers[$c].databases[$db]" "$CONFIG_FILE" | tr -d '"')
|
||||
fi
|
||||
|
||||
if [[ "$db_name" == "ALL" ]]; then
|
||||
echo " Fetching all databases..." | tee -a "$LOG_FILE"
|
||||
|
||||
# Get database list
|
||||
if [[ "$ssh_user" != "root" ]]; then
|
||||
db_list=$(ssh -i "$ssh_key" -p "$ssh_port" -o ConnectTimeout=20 "$ssh_user@$host_ip" \
|
||||
"sudo incus exec $container_name -- bash -c 'cat > /tmp/d6back.cnf << EOF
|
||||
[client]
|
||||
user=$db_user
|
||||
password=$db_pass
|
||||
host=$db_host
|
||||
EOF
|
||||
chmod 600 /tmp/d6back.cnf
|
||||
mariadb --defaults-extra-file=/tmp/d6back.cnf -e \"SHOW DATABASES;\" 2>/dev/null
|
||||
rm -f /tmp/d6back.cnf'" | \
|
||||
grep -Ev '^(Database|information_schema|performance_schema|mysql|sys)$' || echo "")
|
||||
else
|
||||
db_list=$(ssh -i "$ssh_key" -p "$ssh_port" -o ConnectTimeout=20 "$ssh_user@$host_ip" \
|
||||
"incus exec $container_name -- bash -c 'cat > /tmp/d6back.cnf << EOF
|
||||
[client]
|
||||
user=$db_user
|
||||
password=$db_pass
|
||||
host=$db_host
|
||||
EOF
|
||||
chmod 600 /tmp/d6back.cnf
|
||||
mariadb --defaults-extra-file=/tmp/d6back.cnf -e \"SHOW DATABASES;\" 2>/dev/null
|
||||
rm -f /tmp/d6back.cnf'" | \
|
||||
grep -Ev '^(Database|information_schema|performance_schema|mysql|sys)$' || echo "")
|
||||
fi
|
||||
|
||||
# Backup each database
|
||||
for single_db in $db_list; do
|
||||
backup_database "$single_db"
|
||||
done
|
||||
else
|
||||
backup_database "$db_name"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
echo "=== Backup Completed $(date) ===" | tee -a "$LOG_FILE"
|
||||
|
||||
# Cleanup old backups according to retention policy
|
||||
cleanup_old_backups
|
||||
|
||||
# Show summary
|
||||
total_size=$(du -sh "$DIR_BACKUP" 2>/dev/null | cut -f1)
|
||||
echo "Total backup size: $total_size" | tee -a "$LOG_FILE"
|
||||
|
||||
# Add summary to recap
|
||||
echo "" >> "$RECAP_FILE"
|
||||
echo "========================================" >> "$RECAP_FILE"
|
||||
|
||||
# Add size details per host/container
|
||||
echo "BACKUP SIZES:" >> "$RECAP_FILE"
|
||||
for host_dir in "$DIR_BACKUP"/*; do
|
||||
if [[ -d "$host_dir" ]]; then
|
||||
host_name=$(basename "$host_dir")
|
||||
host_size=$(du -sh "$host_dir" 2>/dev/null | cut -f1)
|
||||
echo "" >> "$RECAP_FILE"
|
||||
echo " $host_name: $host_size" >> "$RECAP_FILE"
|
||||
|
||||
# Size per container
|
||||
for container_dir in "$host_dir"/*; do
|
||||
if [[ -d "$container_dir" ]]; then
|
||||
container_name=$(basename "$container_dir")
|
||||
container_size=$(du -sh "$container_dir" 2>/dev/null | cut -f1)
|
||||
echo " - $container_name: $container_size" >> "$RECAP_FILE"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
done
|
||||
|
||||
echo "" >> "$RECAP_FILE"
|
||||
echo "TOTAL SIZE: $total_size" >> "$RECAP_FILE"
|
||||
echo "COMPLETED: $(date '+%d.%m.%Y %H:%M')" >> "$RECAP_FILE"
|
||||
|
||||
# Prepare email subject with date format
|
||||
DATE_SUBJECT=$(date '+%d.%m.%Y %H')
|
||||
|
||||
# Send recap email
|
||||
if [[ $ERROR_COUNT -gt 0 ]]; then
|
||||
echo "Total errors: $ERROR_COUNT" | tee -a "$LOG_FILE"
|
||||
|
||||
# Add errors to recap
|
||||
echo "" >> "$RECAP_FILE"
|
||||
echo "ERRORS DETECTED: $ERROR_COUNT" >> "$RECAP_FILE"
|
||||
echo "----------------------------" >> "$RECAP_FILE"
|
||||
grep -i "ERROR" "$LOG_FILE" >> "$RECAP_FILE"
|
||||
|
||||
# Send email with ERROR in subject
|
||||
echo "Sending ERROR email to $EMAIL_TO (Errors found: $ERROR_COUNT)" | tee -a "$LOG_FILE"
|
||||
if command -v msmtp &> /dev/null; then
|
||||
{
|
||||
echo "To: $EMAIL_TO"
|
||||
echo "Subject: Backup${BACKUP_SERVER} ERROR $DATE_SUBJECT"
|
||||
echo ""
|
||||
cat "$RECAP_FILE"
|
||||
} | msmtp "$EMAIL_TO"
|
||||
echo "ERROR email sent successfully to $EMAIL_TO" | tee -a "$LOG_FILE"
|
||||
else
|
||||
echo "WARNING: msmtp not found - ERROR email NOT sent" | tee -a "$LOG_FILE"
|
||||
fi
|
||||
else
|
||||
echo "Backup completed successfully with no errors" | tee -a "$LOG_FILE"
|
||||
|
||||
# Send success recap email
|
||||
echo "Sending SUCCESS recap email to $EMAIL_TO" | tee -a "$LOG_FILE"
|
||||
if command -v msmtp &> /dev/null; then
|
||||
{
|
||||
echo "To: $EMAIL_TO"
|
||||
echo "Subject: Backup${BACKUP_SERVER} $DATE_SUBJECT"
|
||||
echo ""
|
||||
cat "$RECAP_FILE"
|
||||
} | msmtp "$EMAIL_TO"
|
||||
echo "SUCCESS recap email sent successfully to $EMAIL_TO" | tee -a "$LOG_FILE"
|
||||
else
|
||||
echo "WARNING: msmtp not found - SUCCESS recap email NOT sent" | tee -a "$LOG_FILE"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Clean up recap file
|
||||
rm -f "$RECAP_FILE"
|
||||
|
||||
# Exit with error code if there were errors
|
||||
if [[ $ERROR_COUNT -gt 0 ]]; then
|
||||
exit 1
|
||||
fi
|
||||
112
api/PM7/d6back.yaml
Normal file
112
api/PM7/d6back.yaml
Normal file
@@ -0,0 +1,112 @@
|
||||
# Configuration for MariaDB and directories backup
|
||||
# Backup structure: $dir_backup/$hostname/$containername/ for dirs
|
||||
# $dir_backup/$hostname/$containername/sql/ for databases
|
||||
|
||||
# Global parameters
|
||||
global:
|
||||
backup_server: PM7 # Nom du serveur de backup (PM7, PM1, etc.)
|
||||
email_to: support@unikoffice.com # Email de notification
|
||||
dir_backup: /var/pierre/back # Base backup directory
|
||||
enc_key: /home/pierre/.key_enc # Encryption key for SQL backups
|
||||
keep_dirs: 7d # Garde 7 jours pour les dirs
|
||||
keep_db: 5d,3w,15m # 5 jours complets, 3 semaines (1/jour), 15 mois (1/semaine)
|
||||
|
||||
# Hosts configuration
|
||||
hosts:
|
||||
- name: IN2
|
||||
ip: 145.239.9.105
|
||||
user: debian
|
||||
key: /home/pierre/.ssh/backup_key
|
||||
port: 22
|
||||
dirs:
|
||||
- /etc/nginx
|
||||
containers:
|
||||
- name: nx4
|
||||
db_user: root
|
||||
db_pass: MyDebServer,90b
|
||||
db_host: localhost
|
||||
dirs:
|
||||
- /etc/nginx
|
||||
- /var/www
|
||||
databases:
|
||||
- ALL # Backup all databases
|
||||
onlydb: # Used only with -onlydb parameter (optional)
|
||||
- turing
|
||||
|
||||
- name: IN3
|
||||
ip: 195.154.80.116
|
||||
user: pierre
|
||||
key: /home/pierre/.ssh/backup_key
|
||||
port: 22
|
||||
dirs:
|
||||
- /etc/nginx
|
||||
containers:
|
||||
- name: nx4
|
||||
db_user: root
|
||||
db_pass: MyAlpLocal,90b
|
||||
db_host: localhost
|
||||
dirs:
|
||||
- /etc/nginx
|
||||
- /var/www
|
||||
databases:
|
||||
- ALL # Backup all databases
|
||||
onlydb: # Used only with -onlydb parameter (optional)
|
||||
- geosector
|
||||
|
||||
- name: rca-geo
|
||||
dirs:
|
||||
- /etc/nginx
|
||||
- /var/www
|
||||
|
||||
- name: dva-res
|
||||
db_user: root
|
||||
db_pass: MyAlpineDb.90b
|
||||
db_host: localhost
|
||||
dirs:
|
||||
- /etc/nginx
|
||||
- /var/www
|
||||
databases:
|
||||
- ALL
|
||||
onlydb:
|
||||
- resalice
|
||||
|
||||
- name: dva-front
|
||||
dirs:
|
||||
- /etc/nginx
|
||||
- /var/www
|
||||
|
||||
- name: maria3
|
||||
db_user: root
|
||||
db_pass: MyAlpLocal,90b
|
||||
db_host: localhost
|
||||
dirs:
|
||||
- /etc/my.cnf.d
|
||||
- /var/osm
|
||||
- /var/log
|
||||
databases:
|
||||
- ALL
|
||||
onlydb:
|
||||
- cleo
|
||||
- rca_geo
|
||||
|
||||
- name: IN4
|
||||
ip: 51.159.7.190
|
||||
user: pierre
|
||||
key: /home/pierre/.ssh/backup_key
|
||||
port: 22
|
||||
dirs:
|
||||
- /etc/nginx
|
||||
containers:
|
||||
- name: maria4
|
||||
db_user: root
|
||||
db_pass: MyAlpLocal,90b
|
||||
db_host: localhost
|
||||
dirs:
|
||||
- /etc/my.cnf.d
|
||||
- /var/osm
|
||||
- /var/log
|
||||
databases:
|
||||
- ALL
|
||||
onlydb:
|
||||
- cleo
|
||||
- pra_geo
|
||||
118
api/PM7/decpm7.sh
Normal file
118
api/PM7/decpm7.sh
Normal file
@@ -0,0 +1,118 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Configuration
|
||||
CONFIG_FILE="backpm7.yaml"
|
||||
|
||||
# Check if file argument is provided
|
||||
if [ $# -eq 0 ]; then
|
||||
echo -e "${RED}Error: No input file specified${NC}"
|
||||
echo "Usage: $0 <database.sql.gz.enc>"
|
||||
echo "Example: $0 wordpress_20250905_14.sql.gz.enc"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
INPUT_FILE="$1"
|
||||
|
||||
# Check if input file exists
|
||||
if [ ! -f "$INPUT_FILE" ]; then
|
||||
echo -e "${RED}Error: File not found: $INPUT_FILE${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Function to load encryption key from config
|
||||
load_key_from_config() {
|
||||
if [ ! -f "$CONFIG_FILE" ]; then
|
||||
echo -e "${YELLOW}Warning: $CONFIG_FILE not found${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check for yq
|
||||
if ! command -v yq &> /dev/null; then
|
||||
echo -e "${RED}Error: yq is required to read config file${NC}"
|
||||
echo "Install with: sudo wget -qO /usr/local/bin/yq https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 && sudo chmod +x /usr/local/bin/yq"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local key_path=$(yq '.global.enc_key' "$CONFIG_FILE" | tr -d '"')
|
||||
|
||||
if [ -z "$key_path" ]; then
|
||||
echo -e "${RED}Error: enc_key not found in $CONFIG_FILE${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ ! -f "$key_path" ]; then
|
||||
echo -e "${RED}Error: Encryption key file not found: $key_path${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
ENC_KEY=$(cat "$key_path")
|
||||
echo -e "${GREEN}Encryption key loaded from: $key_path${NC}"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Check file type early - accept both old and new naming
|
||||
if [[ "$INPUT_FILE" != *.sql.gz.enc ]] && [[ "$INPUT_FILE" != *.sql.tar.gz.enc ]]; then
|
||||
echo -e "${RED}Error: File must be a .sql.gz.enc or .sql.tar.gz.enc file${NC}"
|
||||
echo "This tool only decrypts SQL backup files created by backpm7.sh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get encryption key from config
|
||||
if ! load_key_from_config; then
|
||||
echo -e "${RED}Error: Cannot load encryption key${NC}"
|
||||
echo "Make sure $CONFIG_FILE exists and contains enc_key path"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Process SQL backup file
|
||||
echo -e "${BLUE}Decrypting SQL backup: $INPUT_FILE${NC}"
|
||||
|
||||
# Determine output file - extract just the filename and put in current directory
|
||||
BASENAME=$(basename "$INPUT_FILE")
|
||||
if [[ "$BASENAME" == *.sql.tar.gz.enc ]]; then
|
||||
OUTPUT_FILE="${BASENAME%.sql.tar.gz.enc}.sql"
|
||||
else
|
||||
OUTPUT_FILE="${BASENAME%.sql.gz.enc}.sql"
|
||||
fi
|
||||
|
||||
# Decrypt and decompress in one command
|
||||
echo "Decrypting to: $OUTPUT_FILE"
|
||||
|
||||
# Decrypt and decompress in one pipeline
|
||||
if openssl enc -aes-256-cbc -d -salt -pass pass:"$ENC_KEY" -pbkdf2 -in "$INPUT_FILE" | gunzip > "$OUTPUT_FILE" 2>/dev/null; then
|
||||
# Get file size
|
||||
size=$(du -h "$OUTPUT_FILE" | cut -f1)
|
||||
echo -e "${GREEN}✓ Successfully decrypted: $OUTPUT_FILE ($size)${NC}"
|
||||
|
||||
# Show first few lines of SQL
|
||||
echo -e "${BLUE}First 5 lines of SQL:${NC}"
|
||||
head -n 5 "$OUTPUT_FILE"
|
||||
else
|
||||
echo -e "${RED}✗ Decryption failed${NC}"
|
||||
echo "Possible causes:"
|
||||
echo " - Wrong encryption key"
|
||||
echo " - Corrupted file"
|
||||
echo " - File was encrypted differently"
|
||||
|
||||
# Try to help debug
|
||||
echo -e "\n${YELLOW}Debug info:${NC}"
|
||||
echo "File size: $(du -h "$INPUT_FILE" | cut -f1)"
|
||||
echo "First bytes (should start with 'Salted__'):"
|
||||
hexdump -C "$INPUT_FILE" | head -n 1
|
||||
|
||||
# Let's also check what key we're using (first 10 chars)
|
||||
echo "Key begins with: ${ENC_KEY:0:10}..."
|
||||
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}Operation completed successfully${NC}"
|
||||
248
api/PM7/sync_geosector.sh
Normal file
248
api/PM7/sync_geosector.sh
Normal file
@@ -0,0 +1,248 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# sync_geosector.sh - Synchronise les backups geosector depuis PM7 vers maria3 (IN3) et maria4 (IN4)
|
||||
#
|
||||
# Ce script :
|
||||
# 1. Trouve le dernier backup chiffré de geosector sur PM7
|
||||
# 2. Le déchiffre et décompresse localement
|
||||
# 3. Le transfère et l'importe dans IN3/maria3/geosector
|
||||
# 4. Le transfère et l'importe dans IN4/maria4/geosector
|
||||
#
|
||||
# Installation: /var/pierre/bat/sync_geosector.sh
|
||||
# Usage: ./sync_geosector.sh [--force] [--date YYYYMMDD_HH]
|
||||
#
|
||||
|
||||
set -uo pipefail
|
||||
# Note: Removed -e to allow script to continue on sync errors
|
||||
# Errors are handled explicitly with ERROR_COUNT
|
||||
|
||||
# Configuration
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
CONFIG_FILE="$SCRIPT_DIR/d6back.yaml"
|
||||
BACKUP_DIR="/var/pierre/back/IN3/nx4/sql"
|
||||
ENC_KEY_FILE="/home/pierre/.key_enc"
|
||||
SSH_KEY="/home/pierre/.ssh/backup_key"
|
||||
TEMP_DIR="/tmp/geosector_sync"
|
||||
LOG_FILE="/var/pierre/bat/logs/sync_geosector.log"
|
||||
RECAP_FILE="/tmp/sync_geosector_recap_$$.txt"
|
||||
|
||||
# Load email config from d6back.yaml
|
||||
if [[ -f "$CONFIG_FILE" ]]; then
|
||||
EMAIL_TO=$(yq '.global.email_to // "support@unikoffice.com"' "$CONFIG_FILE" | tr -d '"')
|
||||
BACKUP_SERVER=$(yq '.global.backup_server // "BACKUP"' "$CONFIG_FILE" | tr -d '"')
|
||||
else
|
||||
EMAIL_TO="support@unikoffice.com"
|
||||
BACKUP_SERVER="BACKUP"
|
||||
fi
|
||||
|
||||
# Serveurs cibles
|
||||
IN3_HOST="195.154.80.116"
|
||||
IN3_USER="pierre"
|
||||
IN3_CONTAINER="maria3"
|
||||
|
||||
IN4_HOST="51.159.7.190"
|
||||
IN4_USER="pierre"
|
||||
IN4_CONTAINER="maria4"
|
||||
|
||||
# Credentials MariaDB
|
||||
DB_USER="root"
|
||||
IN3_DB_PASS="MyAlpLocal,90b" # maria3
|
||||
IN4_DB_PASS="MyAlpLocal,90b" # maria4
|
||||
DB_NAME="geosector"
|
||||
|
||||
# Fonctions utilitaires
|
||||
log() {
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $*" | tee -a "$LOG_FILE"
|
||||
}
|
||||
|
||||
error() {
|
||||
log "ERROR: $*"
|
||||
exit 1
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
if [[ -d "$TEMP_DIR" ]]; then
|
||||
log "Nettoyage de $TEMP_DIR"
|
||||
rm -rf "$TEMP_DIR"
|
||||
fi
|
||||
rm -f "$RECAP_FILE"
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
# Lecture de la clé de chiffrement
|
||||
if [[ ! -f "$ENC_KEY_FILE" ]]; then
|
||||
error "Clé de chiffrement non trouvée: $ENC_KEY_FILE"
|
||||
fi
|
||||
ENC_KEY=$(cat "$ENC_KEY_FILE")
|
||||
|
||||
# Parsing des arguments
|
||||
FORCE=0
|
||||
SPECIFIC_DATE=""
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--force)
|
||||
FORCE=1
|
||||
shift
|
||||
;;
|
||||
--date)
|
||||
SPECIFIC_DATE="$2"
|
||||
shift 2
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $0 [--force] [--date YYYYMMDD_HH]"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Trouver le fichier backup
|
||||
if [[ -n "$SPECIFIC_DATE" ]]; then
|
||||
BACKUP_FILE="$BACKUP_DIR/geosector_${SPECIFIC_DATE}.sql.gz.enc"
|
||||
if [[ ! -f "$BACKUP_FILE" ]]; then
|
||||
error "Backup non trouvé: $BACKUP_FILE"
|
||||
fi
|
||||
else
|
||||
# Chercher le plus récent
|
||||
BACKUP_FILE=$(find "$BACKUP_DIR" -name "geosector_*.sql.gz.enc" -type f -printf '%T@ %p\n' | sort -rn | head -1 | cut -d' ' -f2-)
|
||||
if [[ -z "$BACKUP_FILE" ]]; then
|
||||
error "Aucun backup geosector trouvé dans $BACKUP_DIR"
|
||||
fi
|
||||
fi
|
||||
|
||||
BACKUP_BASENAME=$(basename "$BACKUP_FILE")
|
||||
log "Backup sélectionné: $BACKUP_BASENAME"
|
||||
|
||||
# Initialiser le fichier récapitulatif
|
||||
echo "SYNC GEOSECTOR REPORT - $(hostname) - $(date '+%d.%m.%Y %H')h" > "$RECAP_FILE"
|
||||
echo "========================================" >> "$RECAP_FILE"
|
||||
echo "" >> "$RECAP_FILE"
|
||||
echo "Backup source: $BACKUP_BASENAME" >> "$RECAP_FILE"
|
||||
echo "" >> "$RECAP_FILE"
|
||||
|
||||
# Créer le répertoire temporaire
|
||||
mkdir -p "$TEMP_DIR"
|
||||
DECRYPTED_FILE="$TEMP_DIR/geosector.sql"
|
||||
|
||||
# Étape 1: Déchiffrer et décompresser
|
||||
log "Déchiffrement et décompression du backup..."
|
||||
if ! openssl enc -aes-256-cbc -d -pass pass:"$ENC_KEY" -pbkdf2 -in "$BACKUP_FILE" | gunzip > "$DECRYPTED_FILE"; then
|
||||
error "Échec du déchiffrement/décompression"
|
||||
fi
|
||||
|
||||
FILE_SIZE=$(du -h "$DECRYPTED_FILE" | cut -f1)
|
||||
log "Fichier SQL déchiffré: $FILE_SIZE"
|
||||
|
||||
echo "Decrypted SQL size: $FILE_SIZE" >> "$RECAP_FILE"
|
||||
echo "" >> "$RECAP_FILE"
|
||||
|
||||
# Compteur d'erreurs
|
||||
ERROR_COUNT=0
|
||||
|
||||
# Fonction pour synchroniser vers un serveur
|
||||
sync_to_server() {
|
||||
local HOST=$1
|
||||
local USER=$2
|
||||
local CONTAINER=$3
|
||||
local DB_PASS=$4
|
||||
local SERVER_NAME=$5
|
||||
|
||||
log "=== Synchronisation vers $SERVER_NAME ($HOST) ==="
|
||||
echo "TARGET: $SERVER_NAME ($HOST/$CONTAINER)" >> "$RECAP_FILE"
|
||||
|
||||
# Test de connexion SSH
|
||||
if ! ssh -i "$SSH_KEY" -o ConnectTimeout=10 "$USER@$HOST" "echo 'SSH OK'" &>/dev/null; then
|
||||
log "ERROR: Impossible de se connecter à $HOST via SSH"
|
||||
echo " ✗ SSH connection FAILED" >> "$RECAP_FILE"
|
||||
((ERROR_COUNT++))
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Import dans MariaDB
|
||||
log "Import dans $SERVER_NAME/$CONTAINER/geosector..."
|
||||
|
||||
# Drop et recréer la base sur le serveur distant
|
||||
if ! ssh -i "$SSH_KEY" "$USER@$HOST" "incus exec $CONTAINER --project default -- mariadb -u root -p'$DB_PASS' -e 'DROP DATABASE IF EXISTS $DB_NAME; CREATE DATABASE $DB_NAME CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;'"; then
|
||||
log "ERROR: Échec de la création de la base sur $SERVER_NAME"
|
||||
echo " ✗ Database creation FAILED" >> "$RECAP_FILE"
|
||||
((ERROR_COUNT++))
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Filtrer et importer le SQL (enlever CREATE DATABASE et USE avec timestamp)
|
||||
log "Filtrage et import du SQL..."
|
||||
if ! sed -e '/^CREATE DATABASE.*geosector_[0-9]/d' \
|
||||
-e '/^USE.*geosector_[0-9]/d' \
|
||||
"$DECRYPTED_FILE" | \
|
||||
ssh -i "$SSH_KEY" "$USER@$HOST" "incus exec $CONTAINER --project default -- mariadb -u root -p'$DB_PASS' $DB_NAME"; then
|
||||
log "ERROR: Échec de l'import sur $SERVER_NAME"
|
||||
echo " ✗ SQL import FAILED" >> "$RECAP_FILE"
|
||||
((ERROR_COUNT++))
|
||||
return 1
|
||||
fi
|
||||
|
||||
log "$SERVER_NAME: Import réussi"
|
||||
echo " ✓ Import SUCCESS" >> "$RECAP_FILE"
|
||||
echo "" >> "$RECAP_FILE"
|
||||
}
|
||||
|
||||
# Synchronisation vers IN3/maria3
|
||||
sync_to_server "$IN3_HOST" "$IN3_USER" "$IN3_CONTAINER" "$IN3_DB_PASS" "IN3/maria3"
|
||||
|
||||
# Synchronisation vers IN4/maria4
|
||||
sync_to_server "$IN4_HOST" "$IN4_USER" "$IN4_CONTAINER" "$IN4_DB_PASS" "IN4/maria4"
|
||||
|
||||
# Finaliser le récapitulatif
|
||||
echo "========================================" >> "$RECAP_FILE"
|
||||
echo "COMPLETED: $(date '+%d.%m.%Y %H:%M')" >> "$RECAP_FILE"
|
||||
|
||||
# Préparer le sujet email avec date
|
||||
DATE_SUBJECT=$(date '+%d.%m.%Y %H')
|
||||
|
||||
# Envoyer l'email récapitulatif
|
||||
if [[ $ERROR_COUNT -gt 0 ]]; then
|
||||
log "Total errors: $ERROR_COUNT"
|
||||
|
||||
# Ajouter les erreurs au récap
|
||||
echo "" >> "$RECAP_FILE"
|
||||
echo "ERRORS DETECTED: $ERROR_COUNT" >> "$RECAP_FILE"
|
||||
echo "----------------------------" >> "$RECAP_FILE"
|
||||
grep -i "ERROR" "$LOG_FILE" | tail -20 >> "$RECAP_FILE"
|
||||
|
||||
# Envoyer email avec ERROR dans le sujet
|
||||
log "Sending ERROR email to $EMAIL_TO (Errors found: $ERROR_COUNT)"
|
||||
if command -v msmtp &> /dev/null; then
|
||||
{
|
||||
echo "To: $EMAIL_TO"
|
||||
echo "Subject: Sync${BACKUP_SERVER} ERROR $DATE_SUBJECT"
|
||||
echo ""
|
||||
cat "$RECAP_FILE"
|
||||
} | msmtp "$EMAIL_TO"
|
||||
log "ERROR email sent successfully to $EMAIL_TO"
|
||||
else
|
||||
log "WARNING: msmtp not found - ERROR email NOT sent"
|
||||
fi
|
||||
|
||||
log "=== Synchronisation terminée avec des erreurs ==="
|
||||
exit 1
|
||||
else
|
||||
log "=== Synchronisation terminée avec succès ==="
|
||||
log "Les bases geosector sur maria3 et maria4 sont à jour avec le backup $BACKUP_BASENAME"
|
||||
|
||||
# Envoyer email de succès
|
||||
log "Sending SUCCESS recap email to $EMAIL_TO"
|
||||
if command -v msmtp &> /dev/null; then
|
||||
{
|
||||
echo "To: $EMAIL_TO"
|
||||
echo "Subject: Sync${BACKUP_SERVER} $DATE_SUBJECT"
|
||||
echo ""
|
||||
cat "$RECAP_FILE"
|
||||
} | msmtp "$EMAIL_TO"
|
||||
log "SUCCESS recap email sent successfully to $EMAIL_TO"
|
||||
else
|
||||
log "WARNING: msmtp not found - SUCCESS recap email NOT sent"
|
||||
fi
|
||||
|
||||
exit 0
|
||||
fi
|
||||
Reference in New Issue
Block a user