scripts v2.0
All checks were successful
Check scripts syntax / check-scripts-syntax (push) Successful in 35s

This commit is contained in:
2025-07-31 21:02:06 -03:00
parent 68511a6915
commit bb0f653ac5
3 changed files with 1116 additions and 301 deletions

594
clean.sh
View File

@@ -1,74 +1,558 @@
#!/bin/bash #!/bin/bash
### AUTO-UPDATER ### # System Cleanup and Maintenance Script
# Variables #
SERVER_FILE="https://git.ivanch.me/ivanch/server-scripts/raw/branch/main/clean.sh" # Description: Comprehensive system cleanup for Docker containers and Linux systems
SERVER_OK=1 # Features:
# - Self-updating capability
# - Docker resource cleanup (images, containers, volumes, networks)
# - Package manager cache cleanup (APK/APT)
# - System cache and temporary file cleanup
# - Log rotation and cleanup
# - Memory cache optimization
# - Journal cleanup (systemd)
# - Thumbnail and user cache cleanup
# Author: ivanch
# Version: 2.0
# Check if the server file exists set -euo pipefail # Exit on error, undefined vars, and pipe failures
curl -s --head $SERVER_FILE | head -n 1 | grep -E "HTTP/[12] [23].." > /dev/null
if [ $? -ne 0 ]; then #==============================================================================
echo "Error: $SERVER_FILE not found." >&2 # CONFIGURATION
SERVER_OK=0 #==============================================================================
fi
if [ $SERVER_OK -eq 1 ]; then # Color definitions for output formatting
echo "Running auto-update..." readonly NC='\033[0m'
readonly RED='\033[1;31m'
readonly GREEN='\033[1;32m'
readonly LIGHT_GREEN='\033[1;32m'
readonly LIGHT_BLUE='\033[1;34m'
readonly LIGHT_GREY='\033[0;37m'
readonly YELLOW='\033[1;33m'
# Compare the local and server files sha256sum to check if an update is needed # Script configuration
LOCAL_SHA256=$(sha256sum clean.sh | awk '{print $1}') readonly SCRIPT_NAME="clean.sh"
SERVER_SHA256=$(curl -s $SERVER_FILE | sha256sum | awk '{print $1}') readonly SERVER_BASE_URL="https://git.ivanch.me/ivanch/server-scripts/raw/branch/main"
if [ "$LOCAL_SHA256" != "$SERVER_SHA256" ]; then # Cleanup configuration
echo "Updating clean.sh..." readonly LOG_RETENTION_DAYS=30
curl -s -o clean.sh $SERVER_FILE readonly JOURNAL_RETENTION_DAYS=7
echo "clean.sh updated." readonly TEMP_DIRS=("/tmp" "/var/tmp")
readonly CACHE_DIRS=("/var/cache" "/root/.cache")
chmod +x clean.sh # Auto-update configuration
echo "Permissions set up." readonly AUTO_UPDATE_ENABLED=true
echo "Running updated clean.sh..." #==============================================================================
./clean.sh # UTILITY FUNCTIONS
exit 0 #==============================================================================
# Print formatted log messages
log_info() { echo -e "${LIGHT_GREY}[i] $1${NC}"; }
log_success() { echo -e "${LIGHT_GREEN}[✓] $1${NC}"; }
log_warning() { echo -e "${YELLOW}[!] $1${NC}"; }
log_error() { echo -e "${RED}[x] $1${NC}" >&2; }
log_step() { echo -e "${LIGHT_BLUE}[i] $1${NC}"; }
# Exit with error message
die() {
log_error "$1"
exit 1
}
# Check if a command exists
command_exists() {
command -v "$1" >/dev/null 2>&1
}
# Get directory size in human readable format
get_dir_size() {
local dir="$1"
if [[ -d "$dir" ]]; then
du -sh "$dir" 2>/dev/null | cut -f1 || echo "0B"
else else
echo "clean.sh is up to date.." echo "0B"
fi fi
fi }
#################### # Safe directory cleanup with size reporting
clean_directory() {
local dir="$1"
local description="$2"
if [[ ! -d "$dir" ]]; then
return 0
fi
local size_before
size_before=$(get_dir_size "$dir")
if [[ "$size_before" == "0B" ]]; then
log_info "$description: already clean"
return 0
fi
log_step "$description (was $size_before)..."
# Use find with -delete for safer cleanup
if find "$dir" -mindepth 1 -delete 2>/dev/null; then
log_success "$description: freed $size_before"
else
# Fallback to rm if find fails
if rm -rf "$dir"/* 2>/dev/null; then
log_success "$description: freed $size_before"
else
log_warning "$description: partial cleanup completed"
fi
fi
}
# Run Docker system prune # Get system information for reporting
echo "Running Docker system prune..." get_system_info() {
docker image prune -af local info=""
docker system prune -af
# Memory info
if [[ -f /proc/meminfo ]]; then
local mem_total mem_available
mem_total=$(grep MemTotal /proc/meminfo | awk '{print $2}')
mem_available=$(grep MemAvailable /proc/meminfo | awk '{print $2}')
if [[ -n "$mem_total" && -n "$mem_available" ]]; then
info+="Memory: $((mem_available/1024))MB available of $((mem_total/1024))MB total"
fi
fi
# Disk space info
if command_exists df; then
local disk_info
disk_info=$(df -h / 2>/dev/null | tail -1 | awk '{print $4 " available of " $2 " total"}')
if [[ -n "$disk_info" ]]; then
info+="${info:+, }Disk: $disk_info"
fi
fi
echo "$info"
}
# Clean APK cache from Alpine or apt for Debian #==============================================================================
if [ -x "$(command -v apk)" ]; then # AUTO-UPDATE FUNCTIONALITY
echo "Cleaning APK cache..." #==============================================================================
rm -rf /var/cache/apk/*
apk cache clean
apk update
fi
if [ -x "$(command -v apt)" ]; then # Check server connectivity
echo "Cleaning apt cache..." check_server_connectivity() {
apt-get clean local url="$1"
apt-get autoclean curl -s --head "$url" | head -n 1 | grep -E "HTTP/[12] [23].." >/dev/null 2>&1
apt-get update }
fi
# Clean system caches # Get SHA256 hash of a file
echo "Cleaning system caches..." get_file_hash() {
rm -rf /var/cache/* local file="$1"
rm -rf /tmp/* sha256sum "$file" 2>/dev/null | awk '{print $1}' || echo ""
}
# General system maintenance # Get SHA256 hash from URL content
echo "Performing general system maintenance..." get_url_hash() {
sync; echo 3 > /proc/sys/vm/drop_caches local url="$1"
curl -s "$url" 2>/dev/null | sha256sum | awk '{print $1}' || echo ""
}
# Remove old logs # Perform self-update if newer version is available
echo "Removing old logs..." perform_self_update() {
find /var/log -type f -name "*.log" -mtime +30 -delete if [[ "$AUTO_UPDATE_ENABLED" != "true" ]]; then
log_info "Auto-update is disabled"
return 0
fi
local server_url="$SERVER_BASE_URL/$SCRIPT_NAME"
log_step "Checking for script updates..."
# Check if server file is accessible
if ! check_server_connectivity "$server_url"; then
log_warning "Cannot connect to update server, continuing with current version"
return 0
fi
# Compare local and server file hashes
local local_hash server_hash
local_hash=$(get_file_hash "$SCRIPT_NAME")
server_hash=$(get_url_hash "$server_url")
if [[ -z "$local_hash" || -z "$server_hash" ]]; then
log_warning "Cannot determine file hashes, skipping update"
return 0
fi
if [[ "$local_hash" != "$server_hash" ]]; then
log_info "Update available, downloading new version..."
# Create backup of current script
local backup_file="${SCRIPT_NAME}.backup.$(date +%s)"
cp "$SCRIPT_NAME" "$backup_file" || die "Failed to create backup"
# Download updated script
if curl -s -o "$SCRIPT_NAME" "$server_url"; then
chmod +x "$SCRIPT_NAME" || die "Failed to set executable permissions"
log_success "Script updated successfully"
log_step "Running updated script..."
exec ./"$SCRIPT_NAME" "$@"
else
# Restore backup on failure
mv "$backup_file" "$SCRIPT_NAME"
die "Failed to download updated script"
fi
else
log_success "Script is already up to date"
fi
}
echo "Maintenance completed." #==============================================================================
# DOCKER CLEANUP FUNCTIONS
#==============================================================================
# Clean Docker resources
cleanup_docker() {
if ! command_exists docker; then
log_info "Docker not found, skipping Docker cleanup"
return 0
fi
log_step "Starting Docker cleanup..."
# Check if Docker daemon is running
if ! docker info >/dev/null 2>&1; then
log_warning "Docker daemon not running, skipping Docker cleanup"
return 0
fi
# Get initial Docker disk usage
local docker_usage_before=""
if docker system df >/dev/null 2>&1; then
docker_usage_before=$(docker system df 2>/dev/null || echo "")
fi
# Remove unused images
log_info "Removing unused Docker images..."
if docker image prune -af >/dev/null 2>&1; then
log_success "Docker images cleaned"
else
log_warning "Docker image cleanup failed"
fi
# Remove stopped containers
log_info "Removing stopped Docker containers..."
if docker container prune -f >/dev/null 2>&1; then
log_success "Docker containers cleaned"
else
log_warning "Docker container cleanup failed"
fi
# Remove unused volumes
log_info "Removing unused Docker volumes..."
if docker volume prune -f >/dev/null 2>&1; then
log_success "Docker volumes cleaned"
else
log_warning "Docker volume cleanup failed"
fi
# Remove unused networks
log_info "Removing unused Docker networks..."
if docker network prune -f >/dev/null 2>&1; then
log_success "Docker networks cleaned"
else
log_warning "Docker network cleanup failed"
fi
# Complete system cleanup
log_info "Running Docker system cleanup..."
if docker system prune -af >/dev/null 2>&1; then
log_success "Docker system cleanup completed"
else
log_warning "Docker system cleanup failed"
fi
# Show space freed if possible
if [[ -n "$docker_usage_before" ]] && docker system df >/dev/null 2>&1; then
log_info "Docker cleanup completed"
fi
}
#==============================================================================
# PACKAGE MANAGER CLEANUP FUNCTIONS
#==============================================================================
# Clean APK cache (Alpine Linux)
cleanup_apk() {
if ! command_exists apk; then
return 0
fi
log_step "Cleaning APK cache..."
# Clean APK cache
if [[ -d /var/cache/apk ]]; then
clean_directory "/var/cache/apk" "APK cache directory"
fi
# Clean APK cache using apk command
if apk cache clean >/dev/null 2>&1; then
log_success "APK cache cleaned"
fi
# Update package index
log_info "Updating APK package index..."
if apk update >/dev/null 2>&1; then
log_success "APK index updated"
else
log_warning "APK index update failed"
fi
}
# Clean APT cache (Debian/Ubuntu)
cleanup_apt() {
if ! command_exists apt-get; then
return 0
fi
log_step "Cleaning APT cache..."
# Clean downloaded packages
if apt-get clean >/dev/null 2>&1; then
log_success "APT cache cleaned"
else
log_warning "APT clean failed"
fi
# Remove orphaned packages
if apt-get autoclean >/dev/null 2>&1; then
log_success "APT autocleaned"
else
log_warning "APT autoclean failed"
fi
# Remove unnecessary packages
if apt-get autoremove -y >/dev/null 2>&1; then
log_success "Unnecessary packages removed"
else
log_warning "APT autoremove failed"
fi
# Update package index
log_info "Updating APT package index..."
if apt-get update >/dev/null 2>&1; then
log_success "APT index updated"
else
log_warning "APT index update failed"
fi
}
#==============================================================================
# SYSTEM CLEANUP FUNCTIONS
#==============================================================================
# Clean system temporary directories
cleanup_temp_dirs() {
log_step "Cleaning temporary directories..."
for temp_dir in "${TEMP_DIRS[@]}"; do
if [[ -d "$temp_dir" ]]; then
# Clean contents but preserve the directory
find "$temp_dir" -mindepth 1 -maxdepth 1 -mtime +1 -exec rm -rf {} + 2>/dev/null || true
log_success "Cleaned old files in $temp_dir"
fi
done
}
# Clean system cache directories
cleanup_cache_dirs() {
log_step "Cleaning cache directories..."
for cache_dir in "${CACHE_DIRS[@]}"; do
if [[ -d "$cache_dir" ]]; then
clean_directory "$cache_dir" "Cache directory $cache_dir"
fi
done
# Clean additional cache locations
local additional_caches=(
"/var/lib/apt/lists"
"/var/cache/debconf"
"/root/.npm"
"/root/.pip"
"/home/*/.cache"
"/home/*/.npm"
"/home/*/.pip"
)
for cache_pattern in "${additional_caches[@]}"; do
# Use shell expansion for patterns
for cache_path in $cache_pattern; do
if [[ -d "$cache_path" ]]; then
clean_directory "$cache_path" "Additional cache $cache_path"
fi
done 2>/dev/null || true
done
}
# Clean old log files
cleanup_logs() {
log_step "Cleaning old log files..."
# Clean logs older than retention period
if [[ -d /var/log ]]; then
local cleaned_count=0
# Find and remove old log files
while IFS= read -r -d '' logfile; do
rm -f "$logfile" 2>/dev/null && ((cleaned_count++))
done < <(find /var/log -type f -name "*.log" -mtime +"$LOG_RETENTION_DAYS" -print0 2>/dev/null || true)
# Clean compressed logs
while IFS= read -r -d '' logfile; do
rm -f "$logfile" 2>/dev/null && ((cleaned_count++))
done < <(find /var/log -type f \( -name "*.log.gz" -o -name "*.log.bz2" -o -name "*.log.xz" \) -mtime +"$LOG_RETENTION_DAYS" -print0 2>/dev/null || true)
if [[ $cleaned_count -gt 0 ]]; then
log_success "Removed $cleaned_count old log files"
else
log_info "No old log files to remove"
fi
fi
# Truncate large active log files
local large_logs
while IFS= read -r -d '' logfile; do
if [[ -f "$logfile" && -w "$logfile" ]]; then
truncate -s 0 "$logfile" 2>/dev/null || true
fi
done < <(find /var/log -type f -name "*.log" -size +100M -print0 2>/dev/null || true)
}
# Clean systemd journal
cleanup_journal() {
if ! command_exists journalctl; then
return 0
fi
log_step "Cleaning systemd journal..."
# Clean journal older than retention period
if journalctl --vacuum-time="${JOURNAL_RETENTION_DAYS}d" >/dev/null 2>&1; then
log_success "Journal cleaned (older than $JOURNAL_RETENTION_DAYS days)"
else
log_warning "Journal cleanup failed"
fi
# Limit journal size
if journalctl --vacuum-size=100M >/dev/null 2>&1; then
log_success "Journal size limited to 100MB"
fi
}
# Clean thumbnail caches
cleanup_thumbnails() {
log_step "Cleaning thumbnail caches..."
local thumbnail_dirs=(
"/root/.thumbnails"
"/root/.cache/thumbnails"
"/home/*/.thumbnails"
"/home/*/.cache/thumbnails"
)
for thumb_pattern in "${thumbnail_dirs[@]}"; do
for thumb_dir in $thumb_pattern; do
if [[ -d "$thumb_dir" ]]; then
clean_directory "$thumb_dir" "Thumbnail cache $thumb_dir"
fi
done 2>/dev/null || true
done
}
# Optimize memory caches
optimize_memory() {
log_step "Optimizing memory caches..."
# Sync filesystem
if sync; then
log_info "Filesystem synced"
fi
# Drop caches (page cache, dentries and inodes)
if [[ -w /proc/sys/vm/drop_caches ]]; then
echo 3 > /proc/sys/vm/drop_caches 2>/dev/null && log_success "Memory caches dropped" || log_warning "Failed to drop memory caches"
fi
}
#==============================================================================
# REPORTING FUNCTIONS
#==============================================================================
# Generate cleanup summary
generate_summary() {
log_step "Generating cleanup summary..."
local system_info
system_info=$(get_system_info)
if [[ -n "$system_info" ]]; then
log_info "System status: $system_info"
fi
# Show disk usage of important directories
local important_dirs=("/" "/var" "/tmp" "/var/log" "/var/cache")
for dir in "${important_dirs[@]}"; do
if [[ -d "$dir" ]]; then
local usage
usage=$(df -h "$dir" 2>/dev/null | tail -1 | awk '{print $5 " used (" $4 " available)"}' || echo "unknown")
log_info "$dir: $usage"
fi
done
}
#==============================================================================
# MAIN EXECUTION
#==============================================================================
main() {
log_step "Starting System Cleanup and Maintenance"
echo
# Show initial system status
local initial_info
initial_info=$(get_system_info)
if [[ -n "$initial_info" ]]; then
log_info "Initial system status: $initial_info"
echo
fi
# Perform self-update if enabled
perform_self_update "$@"
# Docker cleanup
cleanup_docker
# Package manager cleanup
cleanup_apk
cleanup_apt
# System cleanup
cleanup_temp_dirs
cleanup_cache_dirs
cleanup_logs
cleanup_journal
cleanup_thumbnails
# Memory optimization
optimize_memory
# Generate summary
echo
generate_summary
echo
log_success "System cleanup and maintenance completed!"
}
# Execute main function with all arguments
main "$@"

View File

@@ -1,106 +1,338 @@
#!/bin/bash #!/bin/bash
NC='\033[0m' # Docker Container Updater
LIGHT_GREEN='\033[1;32m' #
LIGHT_BLUE='\033[1;34m' # Description: Automatically updates Docker containers and manages Docker images
LIGHT_GREEN='\033[1;32m' # Features:
LIGHT_GREY='\033[0;37m' # - Self-updating capability
# - Updates all Docker Compose projects in /root/docker
# - Skips containers with .ignore file
# - Removes obsolete Docker Compose version attributes
# - Cleans up unused Docker images
# Author: ivanch
# Version: 2.0
### AUTO-UPDATER ### set -euo pipefail # Exit on error, undefined vars, and pipe failures
FILE_NAME="docker-updater.sh"
SERVER_FILE="https://git.ivanch.me/ivanch/server-scripts/raw/branch/main/$FILE_NAME"
SERVER_OK=1
# Check if the server file exists #==============================================================================
curl -s --head $SERVER_FILE | head -n 1 | grep -E "HTTP/[12] [23].." > /dev/null # CONFIGURATION
#==============================================================================
if [ $? -ne 0 ]; then # Color definitions for output formatting
echo -e "${RED}[x] Error: $SERVER_FILE not found.${NC}" >&2 readonly NC='\033[0m'
SERVER_OK=0 readonly RED='\033[1;31m'
fi readonly GREEN='\033[1;32m'
readonly LIGHT_GREEN='\033[1;32m'
readonly LIGHT_BLUE='\033[1;34m'
readonly LIGHT_GREY='\033[0;37m'
readonly YELLOW='\033[1;33m'
if [ $SERVER_OK -eq 1 ]; then # Script configuration
echo -e "${LIGHT_BLUE}[i] Running auto-update" readonly SCRIPT_NAME="docker-updater.sh"
readonly SERVER_BASE_URL="https://git.ivanch.me/ivanch/server-scripts/raw/branch/main"
readonly DOCKER_FOLDER="/root/docker"
readonly COMPOSE_FILES=("docker-compose.yml" "docker-compose.yaml" "compose.yaml" "compose.yml")
# Compare the local and server files sha256sum to check if an update is needed # Auto-update configuration
LOCAL_SHA256=$(sha256sum $FILE_NAME | awk '{print $1}') readonly AUTO_UPDATE_ENABLED=true
SERVER_SHA256=$(curl -s $SERVER_FILE | sha256sum | awk '{print $1}')
if [ "$LOCAL_SHA256" != "$SERVER_SHA256" ]; then #==============================================================================
echo -e "${LIGHT_GREY}[i] Updating $FILE_NAME${NC}" # UTILITY FUNCTIONS
#==============================================================================
curl -s -o $FILE_NAME $SERVER_FILE # Print formatted log messages
chmod +x $FILE_NAME log_info() { echo -e "${LIGHT_GREY}[i] $1${NC}"; }
echo -e "${LIGHT_GREEN}[i] $FILE_NAME updated.${NC}" log_success() { echo -e "${LIGHT_GREEN}[] $1${NC}"; }
log_warning() { echo -e "${YELLOW}[!] $1${NC}"; }
log_error() { echo -e "${RED}[x] $1${NC}" >&2; }
log_step() { echo -e "${LIGHT_BLUE}[i] $1${NC}"; }
log_container() { echo -e "${LIGHT_BLUE}[$1] $2${NC}"; }
echo -e "${LIGHT_BLUE}[i] Running updated $FILE_NAME...${NC}" # Exit with error message
./$FILE_NAME die() {
exit 0 log_error "$1"
else
echo -e "${LIGHT_GREEN}[i] $FILE_NAME is already up to date.${NC}"
fi
fi
####################
# Navigate to docker folder
DOCKER_FOLDER=/root/docker
if [ -d "$DOCKER_FOLDER" ]; then
cd $DOCKER_FOLDER
else
echo -e "${LIGHT_GREY}[i] Docker folder not found.${NC}"
exit 1 exit 1
fi }
# Updating Docker containers # Check if a command exists
command_exists() {
command -v "$1" >/dev/null 2>&1
}
for folder in */; do # Check if Docker and Docker Compose are available
cd $DOCKER_FOLDER/$folder check_docker_requirements() {
log_info "Checking Docker requirements..."
# Remove trailing slash from folder name if it exists
folder=${folder%/} if ! command_exists docker; then
echo -e "${LIGHT_BLUE}[$folder] Checking for updates..." die "Docker is not installed or not in PATH"
# if .ignore file exists, skip the folder
if [ -f ".ignore" ]; then
echo -e "${LIGHT_BLUE}[$folder] Skipping docker container update"
cd ..
continue
fi fi
if ! docker compose version >/dev/null 2>&1; then
die "Docker Compose is not available"
fi
log_success "Docker requirements satisfied"
}
# Check compose files for obsolete version attribute # Get SHA256 hash of a file
for compose_file in "docker-compose.yml" "docker-compose.yaml" "compose.yaml" "compose.yml"; do get_file_hash() {
if [ -f "$compose_file" ]; then local file="$1"
echo -e "${LIGHT_BLUE}[$folder] Checking $compose_file for obsolete version attribute" sha256sum "$file" 2>/dev/null | awk '{print $1}' || echo ""
sed -i '/^version:/d' "$compose_file" }
# Get SHA256 hash from URL content
get_url_hash() {
local url="$1"
curl -s "$url" 2>/dev/null | sha256sum | awk '{print $1}' || echo ""
}
# Check if server file is accessible
check_server_connectivity() {
local url="$1"
curl -s --head "$url" | head -n 1 | grep -E "HTTP/[12] [23].." >/dev/null 2>&1
}
#==============================================================================
# AUTO-UPDATE FUNCTIONALITY
#==============================================================================
# Perform self-update if newer version is available
perform_self_update() {
if [[ "$AUTO_UPDATE_ENABLED" != "true" ]]; then
log_info "Auto-update is disabled"
return 0
fi
local server_url="$SERVER_BASE_URL/$SCRIPT_NAME"
log_step "Checking for script updates..."
# Check if server file is accessible
if ! check_server_connectivity "$server_url"; then
log_warning "Cannot connect to update server, continuing with current version"
return 0
fi
# Compare local and server file hashes
local local_hash
local server_hash
local_hash=$(get_file_hash "$SCRIPT_NAME")
server_hash=$(get_url_hash "$server_url")
if [[ -z "$local_hash" || -z "$server_hash" ]]; then
log_warning "Cannot determine file hashes, skipping update"
return 0
fi
if [[ "$local_hash" != "$server_hash" ]]; then
log_info "Update available, downloading new version..."
# Create backup of current script
local backup_file="${SCRIPT_NAME}.backup.$(date +%s)"
cp "$SCRIPT_NAME" "$backup_file" || die "Failed to create backup"
# Download updated script
if curl -s -o "$SCRIPT_NAME" "$server_url"; then
chmod +x "$SCRIPT_NAME" || die "Failed to set executable permissions"
log_success "Script updated successfully"
log_step "Running updated script..."
exec ./"$SCRIPT_NAME" "$@"
else
# Restore backup on failure
mv "$backup_file" "$SCRIPT_NAME"
die "Failed to download updated script"
fi
else
log_success "Script is already up to date"
fi
}
#==============================================================================
# DOCKER COMPOSE MANAGEMENT
#==============================================================================
# Find the active Docker Compose file in current directory
find_compose_file() {
for compose_file in "${COMPOSE_FILES[@]}"; do
if [[ -f "$compose_file" ]]; then
echo "$compose_file"
return 0
fi fi
done done
return 1
}
DOCKER_RUNNING=$(docker compose ps -q) # Remove obsolete version attribute from Docker Compose files
clean_compose_files() {
local container_name="$1"
for compose_file in "${COMPOSE_FILES[@]}"; do
if [[ -f "$compose_file" ]]; then
log_container "$container_name" "Cleaning obsolete version attribute from $compose_file"
sed -i '/^version:/d' "$compose_file" || log_warning "Failed to clean $compose_file"
fi
done
}
if [ -n "$DOCKER_RUNNING" ]; then # Check if container should be skipped
echo -e "${LIGHT_BLUE}[$folder] Stopping Docker containers" should_skip_container() {
docker compose down > /dev/null [[ -f ".ignore" ]]
else }
echo -e "${LIGHT_BLUE}[$folder] No Docker containers running, will skip update"
continue # Check if any containers are running in current directory
has_running_containers() {
local running_containers
running_containers=$(docker compose ps -q 2>/dev/null || echo "")
[[ -n "$running_containers" ]]
}
# Update a single Docker Compose project
update_docker_project() {
local project_dir="$1"
local container_name
container_name=$(basename "$project_dir")
log_container "$container_name" "Checking for updates..."
# Change to project directory
cd "$project_dir" || {
log_error "Cannot access directory: $project_dir"
return 1
}
# Check if container should be skipped
if should_skip_container; then
log_container "$container_name" "Skipping (found .ignore file)"
return 0
fi fi
# Verify compose file exists
local compose_file
if ! compose_file=$(find_compose_file); then
log_container "$container_name" "No Docker Compose file found, skipping"
return 0
fi
# Clean compose files
clean_compose_files "$container_name"
# Check if containers are running
if ! has_running_containers; then
log_container "$container_name" "No running containers, skipping update"
return 0
fi
# Stop containers
log_container "$container_name" "Stopping containers..."
if ! docker compose down >/dev/null 2>&1; then
log_error "Failed to stop containers in $container_name"
return 1
fi
# Pull updated images
log_container "$container_name" "Pulling updated images..."
if ! docker compose pull -q >/dev/null 2>&1; then
log_warning "Failed to pull images for $container_name, attempting to restart anyway"
fi
# Start containers
log_container "$container_name" "Starting containers..."
if ! docker compose up -d >/dev/null 2>&1; then
log_error "Failed to start containers in $container_name"
return 1
fi
log_container "$container_name" "Update completed successfully!"
return 0
}
echo -e "${LIGHT_BLUE}[$folder] Updating images" # Update all Docker Compose projects
docker compose pull -q > /dev/null update_all_docker_projects() {
log_step "Starting Docker container updates..."
# Check if Docker folder exists
if [[ ! -d "$DOCKER_FOLDER" ]]; then
die "Docker folder not found: $DOCKER_FOLDER"
fi
# Change to Docker folder
cd "$DOCKER_FOLDER" || die "Cannot access Docker folder: $DOCKER_FOLDER"
local updated_count=0
local failed_count=0
local skipped_count=0
# Process each subdirectory
for project_dir in */; do
if [[ -d "$project_dir" ]]; then
local project_path="$DOCKER_FOLDER/$project_dir"
if update_docker_project "$project_path"; then
if should_skip_container; then
((skipped_count++))
else
((updated_count++))
fi
else
((failed_count++))
fi
# Return to Docker folder for next iteration
cd "$DOCKER_FOLDER" || die "Cannot return to Docker folder"
fi
done
# Report results
log_success "Docker update summary:"
log_info " Updated: $updated_count projects"
log_info " Skipped: $skipped_count projects"
if [[ $failed_count -gt 0 ]]; then
log_warning " Failed: $failed_count projects"
fi
}
echo -e "${LIGHT_BLUE}[$folder] Starting Docker containers" #==============================================================================
docker compose up -d > /dev/null # DOCKER CLEANUP
#==============================================================================
echo -e "${LIGHT_GREEN}[$folder] Updated!" # Clean up unused Docker resources
cleanup_docker_resources() {
log_step "Cleaning up unused Docker resources..."
# Remove unused images
log_info "Removing unused Docker images..."
if docker image prune -af >/dev/null 2>&1; then
log_success "Docker image cleanup completed"
else
log_warning "Docker image cleanup failed"
fi
}
cd $DOCKER_FOLDER #==============================================================================
done # MAIN EXECUTION
#==============================================================================
# Run Docker image prune main() {
log_step "Starting Docker Container Updater"
echo
# Check requirements
check_docker_requirements
# Perform self-update if enabled
perform_self_update "$@"
# Update all Docker projects
update_all_docker_projects
# Clean up Docker resources
cleanup_docker_resources
echo
log_success "Docker container update process completed!"
}
echo -e "${LIGHT_BLUE}Running Docker image prune..." # Execute main function with all arguments
docker image prune -af main "$@"
echo -e "${LIGHT_GREEN} All done!"

View File

@@ -1,193 +1,292 @@
#!/bin/bash #!/bin/bash
# Usage: # Usage: curl -sSL https://git.ivanch.me/ivanch/server-scripts/raw/branch/main/scripts-download.sh | bash
## curl -sSL https://git.ivanch.me/ivanch/server-scripts/raw/branch/main/scripts-download.sh | bash
# colors set -euo pipefail
RED='\033[1;31m'
GREEN='\033[1;32m'
NC='\033[0m'
LIGHT_BLUE='\033[1;34m'
LIGHT_RED='\033[1;31m'
LIGHT_GREEN='\033[1;32m'
GREY='\033[1;30m'
YELLOW='\033[1;33m'
FILES_URL="https://git.ivanch.me/ivanch/server-scripts/raw/branch/main" #==============================================================================
# CONFIGURATION
#==============================================================================
echo -e "\r${LIGHT_BLUE}[i] Running scripts-download.sh" # Color definitions for output formatting
readonly RED='\033[1;31m'
readonly GREEN='\033[1;32m'
readonly NC='\033[0m'
readonly LIGHT_BLUE='\033[1;34m'
readonly LIGHT_RED='\033[1;31m'
readonly LIGHT_GREEN='\033[1;32m'
readonly GREY='\033[1;30m'
readonly YELLOW='\033[1;33m'
# Detect OS (Debian or Alpine) # Configuration
echo -e "${GREY}[i] Detecting OS..." readonly FILES_URL="https://git.ivanch.me/ivanch/server-scripts/raw/branch/main"
readonly REQUIRED_PACKAGES=("zip" "unzip" "sha256sum" "curl" "crontab")
readonly AVAILABLE_SCRIPTS=("clean.sh" "backup.sh" "docker-updater.sh")
DETECTED="" # Format: [script_name]="cron_schedule"
declare -A CRONTAB_SCHEDULES=(
["clean.sh"]="0 23 * * *" # Daily at 11 PM
["backup.sh"]="30 23 * * 1,5" # Monday and Friday at 11:30 PM
["docker-updater.sh"]="0 3 */4 * *" # Every 4 days at 3 AM
)
if [ -x "$(command -v apk)" ]; then #==============================================================================
DETECTED="Alpine" # UTILITY FUNCTIONS
fi #==============================================================================
if [ -x "$(command -v apt)" ]; then # Print formatted log messages
DETECTED="Debian" log_info() { echo -e "${GREY}[i] $1${NC}"; }
fi log_success() { echo -e "${GREEN}[✓] $1${NC}"; }
log_warning() { echo -e "${YELLOW}[!] $1${NC}"; }
log_error() { echo -e "${RED}[x] $1${NC}" >&2; }
log_step() { echo -e "${LIGHT_BLUE}[i] $1${NC}"; }
if [ -z "$DETECTED" ]; then # Exit with error message
echo -e "${RED}[x] Error: OS not supported.${NC}" >&2 die() {
log_error "$1"
exit 1 exit 1
fi
echo -e "${GREEN}[✓] Detected '$DETECTED' Linux.${NC}"
echo -e "${GREY}[i] Checking if required packages are installed..."
PACKAGES=("zip" "unzip" "sha256sum" "curl" "crontab")
NOT_INSLALLED=()
detect_packages() {
for PACKAGE in "${PACKAGES[@]}"; do
if ! [ -x "$(command -v $PACKAGE)" ]; then
echo -e "${YELLOW}[!] Error: $PACKAGE is not installed, will attempt to install later.${NC}" >&2
NOT_INSLALLED+=($PACKAGE)
fi
done
} }
detect_packages # Check if a command exists
command_exists() {
command -v "$1" >/dev/null 2>&1
}
if [ ${#NOT_INSLALLED[@]} -ne 0 ]; then # Check if a process is running
if [ "$DETECTED" == "Alpine" ]; then process_running() {
echo -e "${GREY}[i] Installing required packages using APK...${NC}" pgrep "$1" >/dev/null 2>&1
echo -e "${GREY}[i] Updating APK...${NC}" }
apk update >/dev/null
echo -e "${GREY}[i] Installing packages...${NC}"
apk add --no-cache ${NOT_INSLALLED[@]} >/dev/null
if [ $? -ne 0 ]; then #==============================================================================
echo -e "${RED}[x] Error: Failed to install required packages.${NC}" >&2 # MAIN FUNCTIONS
exit 1 #==============================================================================
else
echo -e "${GREEN}[✓] All required packages should be installed.${NC}" # Detect the operating system
detect_operating_system() {
log_info "Detecting operating system..."
if command_exists apk; then
echo "Alpine"
elif command_exists apt; then
echo "Debian"
else
die "Unsupported operating system. This script supports Alpine and Debian-based systems only."
fi
}
# Check for missing packages
get_missing_packages() {
local missing=()
for package in "${REQUIRED_PACKAGES[@]}"; do
if ! command_exists "$package"; then
missing+=("$package")
fi fi
elif [ "$DETECTED" == "Debian" ]; then done
echo -e "${GREY}[i] Installing required packages using APT...${NC}"
echo -e "${GREY}[i] Updating APT...${NC}" printf '%s\n' "${missing[@]}"
apt-get update -y >/dev/null }
echo -e "${GREY}[i] Installing packages...${NC}"
apt-get install -y ${NOT_INSLALLED[@]} >/dev/null
if [ $? -ne 0 ]; then # Install packages based on the detected OS
echo -e "${RED}[x] Error: Failed to install required packages.${NC}" >&2 install_packages() {
exit 1 local os="$1"
else shift
echo -e "${GREEN}[✓] All required packages should be installed.${NC}" local packages=("$@")
if [[ ${#packages[@]} -eq 0 ]]; then
return 0
fi
log_info "Installing required packages: ${packages[*]}"
case "$os" in
"Alpine")
log_info "Updating APK package index..."
apk update >/dev/null || die "Failed to update APK package index"
log_info "Installing packages via APK..."
apk add --no-cache "${packages[@]}" >/dev/null || die "Failed to install packages via APK"
;;
"Debian")
log_info "Updating APT package index..."
apt-get update -y >/dev/null || die "Failed to update APT package index"
log_info "Installing packages via APT..."
apt-get install -y "${packages[@]}" >/dev/null || die "Failed to install packages via APT"
;;
*)
die "Unknown operating system: $os"
;;
esac
}
# Verify all required packages are available
verify_packages() {
log_info "Verifying package installation..."
local missing_packages
readarray -t missing_packages < <(get_missing_packages)
if [[ ${#missing_packages[@]} -gt 0 ]]; then
die "Failed to install required packages: ${missing_packages[*]}"
fi
log_success "All required packages are available"
}
# Check if crontab service is running
check_crontab_service() {
log_info "Checking crontab service status..."
if ! process_running "cron"; then
die "Crontab service is not running. Please start the cron service first."
fi
log_success "Crontab service is running"
}
# Prompt user to select scripts for installation
select_scripts() {
local selected=()
log_info "Available scripts for download and installation:"
echo
for script in "${AVAILABLE_SCRIPTS[@]}"; do
local schedule="${CRONTAB_SCHEDULES[$script]:-"0 0 * * *"}"
echo -e " ${LIGHT_BLUE}$script${NC} - Schedule: ${GREY}$schedule${NC}"
done
echo
log_info "Select scripts to download and install:"
for script in "${AVAILABLE_SCRIPTS[@]}"; do
read -p "Install $script? [Y/n]: " choice </dev/tty
if [[ "$choice" =~ ^[Yy]?$ ]]; then
selected+=("$script")
fi fi
done
if [[ ${#selected[@]} -eq 0 ]]; then
die "No scripts selected. Exiting..."
fi fi
printf '%s\n' "${selected[@]}"
}
NOT_INSLALLED=() # Verify server connectivity for selected scripts
detect_packages verify_server_connectivity() {
local scripts=("$@")
if [ ${#NOT_INSLALLED[@]} -ne 0 ]; then
echo -e "${RED}[x] Error: Failed to run some of the required packages.${NC}" >&2 log_info "Verifying server connectivity..."
echo -e "${RED}[x] [${NOT_INSLALLED[@]}] are not installed.${NC}" >&2
exit 1 for script in "${scripts[@]}"; do
fi local url="$FILES_URL/$script"
fi if ! curl -s --head "$url" | head -n 1 | grep -E "HTTP/[12] [23].." >/dev/null; then
die "Script '$script' not found on server: $url"
echo -e "${GREEN}[✓] All required packages are installed.${NC}" fi
done
echo -e "${GREY}[i] Checking if crontab is running..."
log_success "Server connectivity verified"
# Check if crontab is running on the system using pgrep (crond or cron) }
if ! pgrep "cron" > /dev/null; then
echo -e "${RED}[x] Error: Crontab is not running.${NC}" >&2
exit 1
fi
echo -e "${GREEN}[✓] Crontab is running.${NC}"
# Variables
FILES=("clean.sh" "backup.sh" "docker-updater.sh")
# Prompt user to select files to download
selected_files=()
echo -e "${GREY}[i] Select files to download and install on crontab:${NC} "
for FILE in "${FILES[@]}"; do
read -p "Do you want to download and install $FILE? [Y/n]: " choice </dev/tty
if [[ "$choice" == "y" || "$choice" == "Y" || -z "$choice" ]]; then
selected_files+=("$FILE")
fi
done
if [ ${#selected_files[@]} -eq 0 ]; then
echo -e "${RED}[x] No files selected. Exiting...${NC}"
exit 1
fi
# Check connection with the server for selected files
echo -e "${GREY}[i] Checking connection with the server..."
for FILE in "${selected_files[@]}"; do
curl -s --head "$FILES_URL/$FILE" | head -n 1 | grep -E "HTTP/[12] [23].." > /dev/null
if [ $? -ne 0 ]; then
echo -e "${RED}[x] Error: $FILE not found on the server.${NC}" >&2
exit 1
fi
done
echo -e "${GREEN}[✓] Connection with the server established.${NC}"
echo -e "${GREY}[i] Downloading scripts..."
# Download selected scripts # Download selected scripts
for FILE in "${selected_files[@]}"; do download_scripts() {
curl -s -o "./$FILE" "$FILES_URL/$FILE" local scripts=("$@")
done
log_info "Downloading ${#scripts[@]} script(s)..."
for script in "${scripts[@]}"; do
local url="$FILES_URL/$script"
log_step "Downloading $script..."
if ! curl -s -o "./$script" "$url"; then
die "Failed to download $script from $url"
fi
# Set executable permissions
chmod +x "./$script" || die "Failed to set executable permissions for $script"
done
log_success "All scripts downloaded and configured"
}
echo -e "${GREEN}[✓] Scripts downloaded.${NC}" # Setup crontab entries for selected scripts
setup_crontab() {
local scripts=("$@")
local current_workdir
current_workdir=$(pwd)
log_info "Setting up crontab entries..."
for script in "${scripts[@]}"; do
local schedule="${CRONTAB_SCHEDULES[$script]:-"0 0 * * *"}"
local log_file="/tmp/${script%.*}.log"
local cron_entry="$schedule $current_workdir/$script > $log_file 2>&1"
log_step "Configuring crontab for $script (Schedule: $schedule)..."
# Remove existing crontab entry for this script
if crontab -l 2>/dev/null | grep -q "$script"; then
log_step "Removing existing crontab entry for $script..."
crontab -l 2>/dev/null | grep -v "$script" | crontab - || die "Failed to remove existing crontab entry"
fi
# Add new crontab entry
(crontab -l 2>/dev/null; echo "$cron_entry") | crontab - || die "Failed to add crontab entry for $script"
# Verify the entry was added
if ! crontab -l 2>/dev/null | grep -q "$script"; then
die "Failed to verify crontab entry for $script"
fi
log_success "Crontab configured for $script"
done
log_success "All crontab entries configured successfully"
}
CURRENT_WORKDIR=$(pwd) #==============================================================================
# MAIN EXECUTION
#==============================================================================
# Setup permissions main() {
echo -e "${GREY}[i] Setting up permissions..." log_step "Starting Server Scripts Downloader"
echo
# Setup permissions for selected files
for FILE in "${selected_files[@]}"; do # System detection and validation
chmod +x "./$FILE" local detected_os
done detected_os=$(detect_operating_system)
log_success "Detected $detected_os Linux"
echo -e "${GREEN}[✓] Permissions set up.${NC}"
# Package management
# Setup crontab for selected files local missing_packages
echo -e "${GREY}[i] Setting up crontab..." readarray -t missing_packages < <(get_missing_packages)
# Add crontabs if [[ ${#missing_packages[@]} -gt 0 ]]; then
for FILE in "${selected_files[@]}"; do log_warning "Missing packages detected: ${missing_packages[*]}"
if crontab -l 2>/dev/null | grep -q $FILE; then install_packages "$detected_os" "${missing_packages[@]}"
echo -e "${LIGHT_BLUE}[i] [$FILE] Crontab already exists. Removing...${NC}"
crontab -l | grep -v $FILE | crontab -
fi fi
echo -e "${LIGHT_BLUE}[i] [$FILE] Adding crontab...${NC}"
verify_packages
check_crontab_service
# Script selection and installation
local selected_scripts
readarray -t selected_scripts < <(select_scripts)
log_info "Selected scripts: ${selected_scripts[*]}"
verify_server_connectivity "${selected_scripts[@]}"
download_scripts "${selected_scripts[@]}"
setup_crontab "${selected_scripts[@]}"
echo
log_success "Installation completed successfully!"
log_info "Scripts have been downloaded to: $(pwd)"
log_info "Crontab entries have been configured. Use 'crontab -l' to view them."
log_info "Log files will be created in /tmp/ directory."
}
if [ "$FILE" == "clean.sh" ]; then # Execute main function
(crontab -l 2>/dev/null; echo "0 23 * * * ${CURRENT_WORKDIR}/$FILE > /tmp/clean.log") | crontab - main "$@"
elif [ "$FILE" == "backup.sh" ]; then
(crontab -l 2>/dev/null; echo "30 23 * * 1,5 ${CURRENT_WORKDIR}/$FILE > /tmp/backup.log") | crontab -
elif [ "$FILE" == "docker-updater.sh" ]; then
(crontab -l 2>/dev/null; echo "0 3 */4 * * ${CURRENT_WORKDIR}/$FILE > /tmp/docker-updater.log") | crontab -
else
echo -e "${YELLOW}[w] [$FILE] Warning: Crontab specific schedule not setup.${NC}" >&2
(crontab -l 2>/dev/null; echo "0 0 * * * ${CURRENT_WORKDIR}/$FILE" > /tmp/$FILE.log) | crontab -
fi
echo -e "${GREEN}[✓] [$FILE] Crontab added, double-checking set up...${NC}"
if ! crontab -l | grep -q $FILE; then
echo -e "${RED}[x] [$FILE] Error: Crontab was not set up.${NC}" >&2
exit 1
fi
echo -e "${GREEN}[✓] [$FILE] Crontab confirmed.${NC}"
done
echo -e "${GREEN}[✓] Crontabs all set up.${NC}"
echo -e "${GREEN}[✓] All done.${NC}"