Compare commits
47 Commits
1d6dae86a7
...
main
Author | SHA1 | Date | |
---|---|---|---|
96548f4773 | |||
c34ee5185d | |||
1489062943 | |||
eb8ca78f4f | |||
45567b2242 | |||
b0324ac9d8 | |||
06cf78a4a6 | |||
cd57837696 | |||
cedc435df0 | |||
aa7a9b8548 | |||
11e2a28bd4 | |||
8b52bd2c45 | |||
442ff12039 | |||
12920c10d4 | |||
99e110afb3 | |||
8108ca7e7b | |||
100262513b | |||
7520d70ce9 | |||
7016ced89e | |||
928605a696 | |||
b75240c693 | |||
9ab518e149 | |||
89f0afe334 | |||
018a4a5d60 | |||
cbd813a76e | |||
5f77376a46 | |||
a32bcb34a3 | |||
20ef0eb4b5 | |||
6879e4d2bf | |||
a7fcd1b4fe | |||
ee03aff009 | |||
65efcc4709 | |||
4b323f5833 | |||
7c01b5c8af | |||
b5bbe2628e | |||
72ec3e2477 | |||
df11d9dcf8 | |||
76aaf0180c | |||
31e94b1f2e | |||
d8f0e0e8ee | |||
bb0f653ac5 | |||
68511a6915 | |||
b54fcffb66 | |||
f851a95bdf | |||
cc416e5cd9 | |||
78b706223d | |||
8c0235ebf2 |
92
.gitea/workflows/haven-notify.yaml
Normal file
92
.gitea/workflows/haven-notify.yaml
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
name: Haven Notify Build and Deploy
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
paths:
|
||||||
|
- 'haven-notify/**'
|
||||||
|
- '.gitea/workflows/**'
|
||||||
|
workflow_dispatch: {}
|
||||||
|
|
||||||
|
env:
|
||||||
|
REGISTRY_HOST: git.ivanch.me
|
||||||
|
REGISTRY_USERNAME: ivanch
|
||||||
|
IMAGE_NOTIFY: ${{ env.REGISTRY_HOST }}/ivanch/haven-notify
|
||||||
|
KUBE_CONFIG: ${{ secrets.KUBE_CONFIG }}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build_haven_notify:
|
||||||
|
name: Build Haven Notify Image
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Check out repository
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Log in to Container Registry
|
||||||
|
run: |
|
||||||
|
echo "${{ secrets.REGISTRY_PASSWORD }}" \
|
||||||
|
| docker login "${{ env.REGISTRY_HOST }}" \
|
||||||
|
-u "${{ env.REGISTRY_USERNAME }}" \
|
||||||
|
--password-stdin
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
- name: Build and Push Multi-Arch Image
|
||||||
|
uses: docker/build-push-action@v6
|
||||||
|
with:
|
||||||
|
push: true
|
||||||
|
context: haven-notify
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
tags: |
|
||||||
|
${{ env.IMAGE_NOTIFY }}:latest
|
||||||
|
|
||||||
|
deploy_haven_notify:
|
||||||
|
name: Deploy Haven Notify (internal)
|
||||||
|
runs-on: ubuntu-amd64
|
||||||
|
needs: build_haven_notify
|
||||||
|
steps:
|
||||||
|
- name: Check KUBE_CONFIG validity
|
||||||
|
run: |
|
||||||
|
if [ -z "${KUBE_CONFIG}" ] || [ "${KUBE_CONFIG}" = "" ] || [ "${KUBE_CONFIG// }" = "" ]; then
|
||||||
|
echo "KUBE_CONFIG is not set or is empty."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Check out repository
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Download and install dependencies
|
||||||
|
run: |
|
||||||
|
apt-get update -y
|
||||||
|
apt-get install -y curl
|
||||||
|
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
|
||||||
|
install -m 0755 kubectl /usr/local/bin/kubectl
|
||||||
|
kubectl version --client
|
||||||
|
|
||||||
|
- name: Set up kubeconfig
|
||||||
|
run: |
|
||||||
|
cd haven-notify/deploy
|
||||||
|
echo "$KUBE_CONFIG" > kubeconfig.yaml
|
||||||
|
env:
|
||||||
|
KUBE_CONFIG: ${{ env.KUBE_CONFIG }}
|
||||||
|
|
||||||
|
- name: Check connection to cluster
|
||||||
|
run: |
|
||||||
|
cd haven-notify/deploy
|
||||||
|
kubectl --kubeconfig=kubeconfig.yaml cluster-info
|
||||||
|
|
||||||
|
- name: Apply haven-notify deployment
|
||||||
|
run: |
|
||||||
|
cd haven-notify/deploy
|
||||||
|
kubectl --kubeconfig=kubeconfig.yaml apply -f haven-notify.yaml
|
||||||
|
|
||||||
|
- name: Rollout restart haven-notify
|
||||||
|
run: |
|
||||||
|
cd haven-notify/deploy
|
||||||
|
kubectl --kubeconfig=kubeconfig.yaml rollout restart deployment/haven-notify
|
@@ -17,6 +17,10 @@ When you run the script, you will be prompted to select which scripts you want t
|
|||||||
|
|
||||||
This script is used to backup a directory to a remote server using `rsync`. It is intended to be run as a cron job.
|
This script is used to backup a directory to a remote server using `rsync`. It is intended to be run as a cron job.
|
||||||
|
|
||||||
|
#### `nas-gdrive-backup.sh`
|
||||||
|
|
||||||
|
This script is used to both install itself and to run a periodic backup to Google Drive for files that had changed.
|
||||||
|
|
||||||
### `clean.sh`
|
### `clean.sh`
|
||||||
|
|
||||||
This script is used to clean some of the files, docker dangling images, and docker stopped/unused containers.
|
This script is used to clean some of the files, docker dangling images, and docker stopped/unused containers.
|
||||||
|
76
backup.sh
76
backup.sh
@@ -1,40 +1,23 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
### AUTO-UPDATER ###
|
# Function to send notification
|
||||||
# Variables
|
HOSTNAME=$(cat /etc/hostname)
|
||||||
SERVER_FILE="https://git.ivanch.me/ivanch/server-scripts/raw/branch/main/backup.sh"
|
NOTIFY_URL_ERROR="http://notify.haven/template/notify/error"
|
||||||
SERVER_OK=1
|
NOTIFY_URL_BACKUP="http://notify.haven/template/notify/backup"
|
||||||
|
send_error_notification() {
|
||||||
# Check if the server file exists
|
local message="$1"
|
||||||
curl -s --head $SERVER_FILE | head -n 1 | grep -E "HTTP/[12] [23].." > /dev/null
|
local critical="$2"
|
||||||
|
curl -s -X POST "$NOTIFY_URL_ERROR" \
|
||||||
if [ $? -ne 0 ]; then
|
-H "Content-Type: application/json" \
|
||||||
echo "Error: $SERVER_FILE not found." >&2
|
-d "{\"caller\": \"Docker Backup - $HOSTNAME\", \"message\": \"$message\", \"critical\": $critical}"
|
||||||
SERVER_OK=0
|
}
|
||||||
fi
|
send_backup_notification() {
|
||||||
|
local message="$1"
|
||||||
if [ $SERVER_OK -eq 1 ]; then
|
local backup_size="$2"
|
||||||
echo "Running auto-update..."
|
curl -s -X POST "$NOTIFY_URL_BACKUP" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
# Compare the local and server files sha256sum to check if an update is needed
|
-d "{\"title\": \"Docker Backup - $HOSTNAME\", \"message\": \"$message\", \"backupSizeInMB\": $backup_size}"
|
||||||
LOCAL_SHA256=$(sha256sum backup.sh | awk '{print $1}')
|
}
|
||||||
SERVER_SHA256=$(curl -s $SERVER_FILE | sha256sum | awk '{print $1}')
|
|
||||||
|
|
||||||
if [ "$LOCAL_SHA256" != "$SERVER_SHA256" ]; then
|
|
||||||
echo "Updating backup.sh..."
|
|
||||||
curl -s -o backup.sh $SERVER_FILE
|
|
||||||
echo "backup.sh updated."
|
|
||||||
|
|
||||||
chmod +x backup.sh
|
|
||||||
echo "Permissions set up."
|
|
||||||
|
|
||||||
echo "Running updated backup.sh..."
|
|
||||||
./backup.sh
|
|
||||||
exit 0
|
|
||||||
else
|
|
||||||
echo "backup.sh is up to date.."
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
####################
|
####################
|
||||||
|
|
||||||
@@ -46,21 +29,34 @@ REMOTE_HOST="nas.haven"
|
|||||||
REMOTE_DIR="/export/Backup/Docker/$(cat /etc/hostname)"
|
REMOTE_DIR="/export/Backup/Docker/$(cat /etc/hostname)"
|
||||||
|
|
||||||
# Create a compressed backup file
|
# Create a compressed backup file
|
||||||
zip -r $BACKUP_FILE $SOURCE_DIR
|
zip -q -r $BACKUP_FILE $SOURCE_DIR || true
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
send_error_notification "⚠️ Some files or folders in $SOURCE_DIR could not be backed up (possibly in use or locked). Backup archive created with available files." false
|
||||||
|
fi
|
||||||
|
|
||||||
# Check if remote path exists
|
# Check if remote path exists
|
||||||
ssh $REMOTE_USER@$REMOTE_HOST "mkdir -p $REMOTE_DIR"
|
if ! ssh $REMOTE_USER@$REMOTE_HOST "mkdir -p $REMOTE_DIR"; then
|
||||||
|
send_error_notification "❌ Failed to create remote directory: $REMOTE_DIR on $REMOTE_HOST" true
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
# Transfer the backup file to the remote server
|
# Transfer the backup file to the remote server
|
||||||
scp $BACKUP_FILE $REMOTE_USER@$REMOTE_HOST:$REMOTE_DIR
|
if ! scp $BACKUP_FILE $REMOTE_USER@$REMOTE_HOST:$REMOTE_DIR; then
|
||||||
|
send_error_notification "❌ Failed to transfer backup file to remote server: $REMOTE_HOST:$REMOTE_DIR" true
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
# Remove the backup file
|
# Remove the backup file
|
||||||
|
BACKUP_SIZE=$(du -m $BACKUP_FILE | cut -f1)
|
||||||
rm $BACKUP_FILE
|
rm $BACKUP_FILE
|
||||||
|
|
||||||
# Erase last 7 days backups from remote server
|
# Erase last 7 days backups from remote server
|
||||||
ssh $REMOTE_USER@$REMOTE_HOST "find $REMOTE_DIR -type f -name 'docker_backup_*' -mtime +7 -exec rm {} \;"
|
if ! ssh $REMOTE_USER@$REMOTE_HOST "find $REMOTE_DIR -type f -name 'docker_backup_*' -mtime +7 -exec rm {} \;"; then
|
||||||
|
send_error_notification "⚠️ Failed to clean old backups on remote server: $REMOTE_HOST:$REMOTE_DIR" false
|
||||||
|
fi
|
||||||
|
|
||||||
# Exit
|
# Success notification
|
||||||
|
send_backup_notification "✅ Backup completed successfully for: $SOURCE_DIR to $REMOTE_HOST:$REMOTE_DIR" $BACKUP_SIZE
|
||||||
echo "Backup completed successfully"
|
echo "Backup completed successfully"
|
||||||
exit 0
|
exit 0
|
||||||
|
|
||||||
|
511
clean.sh
511
clean.sh
@@ -1,74 +1,477 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
### AUTO-UPDATER ###
|
# System Cleanup and Maintenance Script
|
||||||
# Variables
|
#
|
||||||
SERVER_FILE="https://git.ivanch.me/ivanch/server-scripts/raw/branch/main/clean.sh"
|
# Description: Comprehensive system cleanup for Docker containers and Linux systems
|
||||||
SERVER_OK=1
|
# Features:
|
||||||
|
# - Docker resource cleanup (images, containers, volumes, networks)
|
||||||
|
# - Package manager cache cleanup (APK/APT)
|
||||||
|
# - System cache and temporary file cleanup
|
||||||
|
# - Log rotation and cleanup
|
||||||
|
# - Memory cache optimization
|
||||||
|
# - Journal cleanup (systemd)
|
||||||
|
# - Thumbnail and user cache cleanup
|
||||||
|
# Author: ivanch
|
||||||
|
# Version: 2.0
|
||||||
|
|
||||||
# Check if the server file exists
|
set -euo pipefail # Exit on error, undefined vars, and pipe failures
|
||||||
curl -s --head $SERVER_FILE | head -n 1 | grep -E "HTTP/[12] [23].." > /dev/null
|
|
||||||
|
|
||||||
if [ $? -ne 0 ]; then
|
#==============================================================================
|
||||||
echo "Error: $SERVER_FILE not found." >&2
|
# CONFIGURATION
|
||||||
SERVER_OK=0
|
#==============================================================================
|
||||||
fi
|
|
||||||
|
|
||||||
if [ $SERVER_OK -eq 1 ]; then
|
# Color definitions for output formatting
|
||||||
echo "Running auto-update..."
|
readonly NC='\033[0m'
|
||||||
|
readonly RED='\033[1;31m'
|
||||||
|
readonly GREEN='\033[1;32m'
|
||||||
|
readonly LIGHT_GREEN='\033[1;32m'
|
||||||
|
readonly LIGHT_BLUE='\033[1;34m'
|
||||||
|
readonly LIGHT_GREY='\033[0;37m'
|
||||||
|
readonly YELLOW='\033[1;33m'
|
||||||
|
|
||||||
# Compare the local and server files sha256sum to check if an update is needed
|
# Cleanup configuration
|
||||||
LOCAL_SHA256=$(sha256sum clean.sh | awk '{print $1}')
|
readonly LOG_RETENTION_DAYS=30
|
||||||
SERVER_SHA256=$(curl -s $SERVER_FILE | sha256sum | awk '{print $1}')
|
readonly JOURNAL_RETENTION_DAYS=7
|
||||||
|
readonly TEMP_DIRS=("/tmp" "/var/tmp")
|
||||||
|
readonly CACHE_DIRS=("/var/cache" "/root/.cache")
|
||||||
|
|
||||||
if [ "$LOCAL_SHA256" != "$SERVER_SHA256" ]; then
|
# Auto-update configuration
|
||||||
echo "Updating clean.sh..."
|
readonly AUTO_UPDATE_ENABLED=true
|
||||||
curl -s -o clean.sh $SERVER_FILE
|
|
||||||
echo "clean.sh updated."
|
|
||||||
|
|
||||||
chmod +x clean.sh
|
#==============================================================================
|
||||||
echo "Permissions set up."
|
# UTILITY FUNCTIONS
|
||||||
|
#==============================================================================
|
||||||
|
|
||||||
echo "Running updated clean.sh..."
|
# Print formatted log messages
|
||||||
./clean.sh
|
log_info() { echo -e "${LIGHT_GREY}[i] $1${NC}"; }
|
||||||
exit 0
|
log_success() { echo -e "${LIGHT_GREEN}[✓] $1${NC}"; }
|
||||||
|
log_warning() { echo -e "${YELLOW}[!] $1${NC}"; }
|
||||||
|
log_error() { echo -e "${RED}[x] $1${NC}" >&2; }
|
||||||
|
log_step() { echo -e "${LIGHT_BLUE}[i] $1${NC}"; }
|
||||||
|
|
||||||
|
# Exit with error message
|
||||||
|
die() {
|
||||||
|
log_error "$1"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if a command exists
|
||||||
|
command_exists() {
|
||||||
|
command -v "$1" >/dev/null 2>&1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get directory size in human readable format
|
||||||
|
get_dir_size() {
|
||||||
|
local dir="$1"
|
||||||
|
if [[ -d "$dir" ]]; then
|
||||||
|
du -sh "$dir" 2>/dev/null | cut -f1 || echo "0B"
|
||||||
else
|
else
|
||||||
echo "clean.sh is up to date.."
|
echo "0B"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Safe directory cleanup with size reporting
|
||||||
|
clean_directory() {
|
||||||
|
local dir="$1"
|
||||||
|
local description="$2"
|
||||||
|
|
||||||
|
if [[ ! -d "$dir" ]]; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
local size_before
|
||||||
|
size_before=$(get_dir_size "$dir")
|
||||||
|
|
||||||
|
if [[ "$size_before" == "0B" ]]; then
|
||||||
|
log_info "$description: already clean"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_step "$description (was $size_before)..."
|
||||||
|
|
||||||
|
# Use find with -delete for safer cleanup
|
||||||
|
if find "$dir" -mindepth 1 -delete 2>/dev/null; then
|
||||||
|
log_success "$description: freed $size_before"
|
||||||
|
else
|
||||||
|
# Fallback to rm if find fails
|
||||||
|
if rm -rf "$dir"/* 2>/dev/null; then
|
||||||
|
log_success "$description: freed $size_before"
|
||||||
|
else
|
||||||
|
log_warning "$description: partial cleanup completed"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get system information for reporting
|
||||||
|
get_system_info() {
|
||||||
|
local info=""
|
||||||
|
|
||||||
|
# Memory info
|
||||||
|
if [[ -f /proc/meminfo ]]; then
|
||||||
|
local mem_total mem_available
|
||||||
|
mem_total=$(grep MemTotal /proc/meminfo | awk '{print $2}')
|
||||||
|
mem_available=$(grep MemAvailable /proc/meminfo | awk '{print $2}')
|
||||||
|
if [[ -n "$mem_total" && -n "$mem_available" ]]; then
|
||||||
|
info+="Memory: $((mem_available/1024))MB available of $((mem_total/1024))MB total"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
####################
|
# Disk space info
|
||||||
|
if command_exists df; then
|
||||||
# Run Docker system prune
|
local disk_info
|
||||||
echo "Running Docker system prune..."
|
disk_info=$(df -h / 2>/dev/null | tail -1 | awk '{print $4 " available of " $2 " total"}')
|
||||||
docker image prune -af
|
if [[ -n "$disk_info" ]]; then
|
||||||
docker system prune -af
|
info+="${info:+, }Disk: $disk_info"
|
||||||
|
fi
|
||||||
# Clean APK cache from Alpine or apt for Debian
|
|
||||||
if [ -x "$(command -v apk)" ]; then
|
|
||||||
echo "Cleaning APK cache..."
|
|
||||||
rm -rf /var/cache/apk/*
|
|
||||||
apk cache clean
|
|
||||||
apk update
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -x "$(command -v apt)" ]; then
|
echo "$info"
|
||||||
echo "Cleaning apt cache..."
|
}
|
||||||
apt-get clean
|
|
||||||
apt-get autoclean
|
#==============================================================================
|
||||||
apt-get update
|
# DOCKER CLEANUP FUNCTIONS
|
||||||
|
#==============================================================================
|
||||||
|
|
||||||
|
# Clean Docker resources
|
||||||
|
cleanup_docker() {
|
||||||
|
if ! command_exists docker; then
|
||||||
|
log_info "Docker not found, skipping Docker cleanup"
|
||||||
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Clean system caches
|
log_step "Starting Docker cleanup..."
|
||||||
echo "Cleaning system caches..."
|
|
||||||
rm -rf /var/cache/*
|
|
||||||
rm -rf /tmp/*
|
|
||||||
|
|
||||||
# General system maintenance
|
# Check if Docker daemon is running
|
||||||
echo "Performing general system maintenance..."
|
if ! docker info >/dev/null 2>&1; then
|
||||||
sync; echo 3 > /proc/sys/vm/drop_caches
|
log_warning "Docker daemon not running, skipping Docker cleanup"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
# Remove old logs
|
# Get initial Docker disk usage
|
||||||
echo "Removing old logs..."
|
local docker_usage_before=""
|
||||||
find /var/log -type f -name "*.log" -mtime +30 -delete
|
if docker system df >/dev/null 2>&1; then
|
||||||
|
docker_usage_before=$(docker system df 2>/dev/null || echo "")
|
||||||
|
fi
|
||||||
|
|
||||||
echo "Maintenance completed."
|
# Remove unused images
|
||||||
|
log_info "Removing unused Docker images..."
|
||||||
|
if docker image prune -af >/dev/null 2>&1; then
|
||||||
|
log_success "Docker images cleaned"
|
||||||
|
else
|
||||||
|
log_warning "Docker image cleanup failed"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Remove stopped containers
|
||||||
|
log_info "Removing stopped Docker containers..."
|
||||||
|
if docker container prune -f >/dev/null 2>&1; then
|
||||||
|
log_success "Docker containers cleaned"
|
||||||
|
else
|
||||||
|
log_warning "Docker container cleanup failed"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Remove unused volumes
|
||||||
|
log_info "Removing unused Docker volumes..."
|
||||||
|
if docker volume prune -f >/dev/null 2>&1; then
|
||||||
|
log_success "Docker volumes cleaned"
|
||||||
|
else
|
||||||
|
log_warning "Docker volume cleanup failed"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Remove unused networks
|
||||||
|
log_info "Removing unused Docker networks..."
|
||||||
|
if docker network prune -f >/dev/null 2>&1; then
|
||||||
|
log_success "Docker networks cleaned"
|
||||||
|
else
|
||||||
|
log_warning "Docker network cleanup failed"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Complete system cleanup
|
||||||
|
log_info "Running Docker system cleanup..."
|
||||||
|
if docker system prune -af >/dev/null 2>&1; then
|
||||||
|
log_success "Docker system cleanup completed"
|
||||||
|
else
|
||||||
|
log_warning "Docker system cleanup failed"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Show space freed if possible
|
||||||
|
if [[ -n "$docker_usage_before" ]] && docker system df >/dev/null 2>&1; then
|
||||||
|
log_info "Docker cleanup completed"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
#==============================================================================
|
||||||
|
# PACKAGE MANAGER CLEANUP FUNCTIONS
|
||||||
|
#==============================================================================
|
||||||
|
|
||||||
|
# Clean APK cache (Alpine Linux)
|
||||||
|
cleanup_apk() {
|
||||||
|
if ! command_exists apk; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_step "Cleaning APK cache..."
|
||||||
|
|
||||||
|
# Clean APK cache
|
||||||
|
if [[ -d /var/cache/apk ]]; then
|
||||||
|
clean_directory "/var/cache/apk" "APK cache directory"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Clean APK cache using apk command
|
||||||
|
if apk cache clean >/dev/null 2>&1; then
|
||||||
|
log_success "APK cache cleaned"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Update package index
|
||||||
|
log_info "Updating APK package index..."
|
||||||
|
if apk update >/dev/null 2>&1; then
|
||||||
|
log_success "APK index updated"
|
||||||
|
else
|
||||||
|
log_warning "APK index update failed"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Clean APT cache (Debian/Ubuntu)
|
||||||
|
cleanup_apt() {
|
||||||
|
if ! command_exists apt-get; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_step "Cleaning APT cache..."
|
||||||
|
|
||||||
|
# Clean downloaded packages
|
||||||
|
if apt-get clean >/dev/null 2>&1; then
|
||||||
|
log_success "APT cache cleaned"
|
||||||
|
else
|
||||||
|
log_warning "APT clean failed"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Remove orphaned packages
|
||||||
|
if apt-get autoclean >/dev/null 2>&1; then
|
||||||
|
log_success "APT autocleaned"
|
||||||
|
else
|
||||||
|
log_warning "APT autoclean failed"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Remove unnecessary packages
|
||||||
|
if apt-get autoremove -y >/dev/null 2>&1; then
|
||||||
|
log_success "Unnecessary packages removed"
|
||||||
|
else
|
||||||
|
log_warning "APT autoremove failed"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Update package index
|
||||||
|
log_info "Updating APT package index..."
|
||||||
|
if apt-get update >/dev/null 2>&1; then
|
||||||
|
log_success "APT index updated"
|
||||||
|
else
|
||||||
|
log_warning "APT index update failed"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
#==============================================================================
|
||||||
|
# SYSTEM CLEANUP FUNCTIONS
|
||||||
|
#==============================================================================
|
||||||
|
|
||||||
|
# Clean system temporary directories
|
||||||
|
cleanup_temp_dirs() {
|
||||||
|
log_step "Cleaning temporary directories..."
|
||||||
|
|
||||||
|
for temp_dir in "${TEMP_DIRS[@]}"; do
|
||||||
|
if [[ -d "$temp_dir" ]]; then
|
||||||
|
# Clean contents but preserve the directory
|
||||||
|
find "$temp_dir" -mindepth 1 -maxdepth 1 -mtime +1 -exec rm -rf {} + 2>/dev/null || true
|
||||||
|
log_success "Cleaned old files in $temp_dir"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
# Clean system cache directories
|
||||||
|
cleanup_cache_dirs() {
|
||||||
|
log_step "Cleaning cache directories..."
|
||||||
|
|
||||||
|
for cache_dir in "${CACHE_DIRS[@]}"; do
|
||||||
|
if [[ -d "$cache_dir" ]]; then
|
||||||
|
clean_directory "$cache_dir" "Cache directory $cache_dir"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Clean additional cache locations
|
||||||
|
local additional_caches=(
|
||||||
|
"/var/lib/apt/lists"
|
||||||
|
"/var/cache/debconf"
|
||||||
|
"/root/.npm"
|
||||||
|
"/root/.pip"
|
||||||
|
"/home/*/.cache"
|
||||||
|
"/home/*/.npm"
|
||||||
|
"/home/*/.pip"
|
||||||
|
)
|
||||||
|
|
||||||
|
for cache_pattern in "${additional_caches[@]}"; do
|
||||||
|
# Use shell expansion for patterns
|
||||||
|
for cache_path in $cache_pattern; do
|
||||||
|
if [[ -d "$cache_path" ]]; then
|
||||||
|
clean_directory "$cache_path" "Additional cache $cache_path"
|
||||||
|
fi
|
||||||
|
done 2>/dev/null || true
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
# Clean old log files
|
||||||
|
cleanup_logs() {
|
||||||
|
log_step "Cleaning old log files..."
|
||||||
|
|
||||||
|
# Clean logs older than retention period
|
||||||
|
if [[ -d /var/log ]]; then
|
||||||
|
local cleaned_count=0
|
||||||
|
|
||||||
|
# Find and remove old log files
|
||||||
|
while IFS= read -r -d '' logfile; do
|
||||||
|
rm -f "$logfile" 2>/dev/null && ((cleaned_count++))
|
||||||
|
done < <(find /var/log -type f -name "*.log" -mtime +"$LOG_RETENTION_DAYS" -print0 2>/dev/null || true)
|
||||||
|
|
||||||
|
# Clean compressed logs
|
||||||
|
while IFS= read -r -d '' logfile; do
|
||||||
|
rm -f "$logfile" 2>/dev/null && ((cleaned_count++))
|
||||||
|
done < <(find /var/log -type f \( -name "*.log.gz" -o -name "*.log.bz2" -o -name "*.log.xz" \) -mtime +"$LOG_RETENTION_DAYS" -print0 2>/dev/null || true)
|
||||||
|
|
||||||
|
if [[ $cleaned_count -gt 0 ]]; then
|
||||||
|
log_success "Removed $cleaned_count old log files"
|
||||||
|
else
|
||||||
|
log_info "No old log files to remove"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Truncate large active log files
|
||||||
|
local large_logs
|
||||||
|
while IFS= read -r -d '' logfile; do
|
||||||
|
if [[ -f "$logfile" && -w "$logfile" ]]; then
|
||||||
|
truncate -s 0 "$logfile" 2>/dev/null || true
|
||||||
|
fi
|
||||||
|
done < <(find /var/log -type f -name "*.log" -size +100M -print0 2>/dev/null || true)
|
||||||
|
}
|
||||||
|
|
||||||
|
# Clean systemd journal
|
||||||
|
cleanup_journal() {
|
||||||
|
if ! command_exists journalctl; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_step "Cleaning systemd journal..."
|
||||||
|
|
||||||
|
# Clean journal older than retention period
|
||||||
|
if journalctl --vacuum-time="${JOURNAL_RETENTION_DAYS}d" >/dev/null 2>&1; then
|
||||||
|
log_success "Journal cleaned (older than $JOURNAL_RETENTION_DAYS days)"
|
||||||
|
else
|
||||||
|
log_warning "Journal cleanup failed"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Limit journal size
|
||||||
|
if journalctl --vacuum-size=100M >/dev/null 2>&1; then
|
||||||
|
log_success "Journal size limited to 100MB"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Clean thumbnail caches
|
||||||
|
cleanup_thumbnails() {
|
||||||
|
log_step "Cleaning thumbnail caches..."
|
||||||
|
|
||||||
|
local thumbnail_dirs=(
|
||||||
|
"/root/.thumbnails"
|
||||||
|
"/root/.cache/thumbnails"
|
||||||
|
"/home/*/.thumbnails"
|
||||||
|
"/home/*/.cache/thumbnails"
|
||||||
|
)
|
||||||
|
|
||||||
|
for thumb_pattern in "${thumbnail_dirs[@]}"; do
|
||||||
|
for thumb_dir in $thumb_pattern; do
|
||||||
|
if [[ -d "$thumb_dir" ]]; then
|
||||||
|
clean_directory "$thumb_dir" "Thumbnail cache $thumb_dir"
|
||||||
|
fi
|
||||||
|
done 2>/dev/null || true
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
# Optimize memory caches
|
||||||
|
optimize_memory() {
|
||||||
|
log_step "Optimizing memory caches..."
|
||||||
|
|
||||||
|
# Sync filesystem
|
||||||
|
if sync; then
|
||||||
|
log_info "Filesystem synced"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Drop caches (page cache, dentries and inodes)
|
||||||
|
if [[ -w /proc/sys/vm/drop_caches ]]; then
|
||||||
|
echo 3 > /proc/sys/vm/drop_caches 2>/dev/null && log_success "Memory caches dropped" || log_warning "Failed to drop memory caches"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
#==============================================================================
|
||||||
|
# REPORTING FUNCTIONS
|
||||||
|
#==============================================================================
|
||||||
|
|
||||||
|
# Generate cleanup summary
|
||||||
|
generate_summary() {
|
||||||
|
log_step "Generating cleanup summary..."
|
||||||
|
|
||||||
|
local system_info
|
||||||
|
system_info=$(get_system_info)
|
||||||
|
|
||||||
|
if [[ -n "$system_info" ]]; then
|
||||||
|
log_info "System status: $system_info"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Show disk usage of important directories
|
||||||
|
local important_dirs=("/" "/var" "/tmp" "/var/log" "/var/cache")
|
||||||
|
for dir in "${important_dirs[@]}"; do
|
||||||
|
if [[ -d "$dir" ]]; then
|
||||||
|
local usage
|
||||||
|
usage=$(df -h "$dir" 2>/dev/null | tail -1 | awk '{print $5 " used (" $4 " available)"}' || echo "unknown")
|
||||||
|
log_info "$dir: $usage"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
#==============================================================================
|
||||||
|
# MAIN EXECUTION
|
||||||
|
#==============================================================================
|
||||||
|
|
||||||
|
main() {
|
||||||
|
log_step "Starting System Cleanup and Maintenance"
|
||||||
|
echo
|
||||||
|
|
||||||
|
# Show initial system status
|
||||||
|
local initial_info
|
||||||
|
initial_info=$(get_system_info)
|
||||||
|
if [[ -n "$initial_info" ]]; then
|
||||||
|
log_info "Initial system status: $initial_info"
|
||||||
|
echo
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Docker cleanup
|
||||||
|
cleanup_docker
|
||||||
|
|
||||||
|
# Package manager cleanup
|
||||||
|
cleanup_apk
|
||||||
|
cleanup_apt
|
||||||
|
|
||||||
|
# System cleanup
|
||||||
|
cleanup_temp_dirs
|
||||||
|
cleanup_cache_dirs
|
||||||
|
cleanup_logs
|
||||||
|
cleanup_journal
|
||||||
|
cleanup_thumbnails
|
||||||
|
|
||||||
|
# Memory optimization
|
||||||
|
optimize_memory
|
||||||
|
|
||||||
|
# Generate summary
|
||||||
|
echo
|
||||||
|
generate_summary
|
||||||
|
|
||||||
|
echo
|
||||||
|
log_success "System cleanup and maintenance completed!"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Execute main function with all arguments
|
||||||
|
main "$@"
|
@@ -1,106 +1,280 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
NC='\033[0m'
|
# Docker Container Updater
|
||||||
LIGHT_GREEN='\033[1;32m'
|
#
|
||||||
LIGHT_BLUE='\033[1;34m'
|
# Description: Automatically updates Docker containers and manages Docker images
|
||||||
LIGHT_GREEN='\033[1;32m'
|
# Features:
|
||||||
LIGHT_GREY='\033[0;37m'
|
# - Updates all Docker Compose projects in /root/docker
|
||||||
|
# - Skips containers with .ignore file
|
||||||
|
# - Removes obsolete Docker Compose version attributes
|
||||||
|
# - Cleans up unused Docker images
|
||||||
|
# Author: ivanch
|
||||||
|
# Version: 2.0
|
||||||
|
|
||||||
### AUTO-UPDATER ###
|
set -euo pipefail # Exit on error, undefined vars, and pipe failures
|
||||||
FILE_NAME="docker-updater.sh"
|
HOSTNAME=$(cat /etc/hostname)
|
||||||
SERVER_FILE="https://git.ivanch.me/ivanch/server-scripts/raw/branch/main/$FILE_NAME"
|
NOTIFY_URL_ERROR="http://notify.haven/template/notify/error"
|
||||||
SERVER_OK=1
|
NOTIFY_URL_UPDATE="http://notify.haven/template/notify/update"
|
||||||
|
send_error_notification() {
|
||||||
|
local message="$1"
|
||||||
|
local critical="$2"
|
||||||
|
curl -s -X POST "$NOTIFY_URL_ERROR" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d "{\"caller\": \"$HOSTNAME\", \"message\": \"$message\", \"critical\": $critical}"
|
||||||
|
}
|
||||||
|
send_update_notification() {
|
||||||
|
local script_time="$1"
|
||||||
|
curl -s -X POST "$NOTIFY_URL_UPDATE" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d "{\"host\": \"$HOSTNAME\", \"asset\": \"Docker containers\", \"time\": $script_time}"
|
||||||
|
}
|
||||||
|
|
||||||
# Check if the server file exists
|
#==============================================================================
|
||||||
curl -s --head $SERVER_FILE | head -n 1 | grep -E "HTTP/[12] [23].." > /dev/null
|
# CONFIGURATION
|
||||||
|
#==============================================================================
|
||||||
|
|
||||||
if [ $? -ne 0 ]; then
|
# Color definitions for output formatting
|
||||||
echo -e "${RED}[x] Error: $SERVER_FILE not found.${NC}" >&2
|
readonly NC='\033[0m'
|
||||||
SERVER_OK=0
|
readonly RED='\033[1;31m'
|
||||||
fi
|
readonly GREEN='\033[1;32m'
|
||||||
|
readonly LIGHT_GREEN='\033[1;32m'
|
||||||
|
readonly LIGHT_BLUE='\033[1;34m'
|
||||||
|
readonly LIGHT_GREY='\033[0;37m'
|
||||||
|
readonly YELLOW='\033[1;33m'
|
||||||
|
|
||||||
if [ $SERVER_OK -eq 1 ]; then
|
# Script configuration
|
||||||
echo -e "${LIGHT_BLUE}[i] Running auto-update"
|
readonly DOCKER_FOLDER="/root/docker"
|
||||||
|
readonly COMPOSE_FILES=("docker-compose.yml" "docker-compose.yaml" "compose.yaml" "compose.yml")
|
||||||
|
|
||||||
# Compare the local and server files sha256sum to check if an update is needed
|
# Auto-update configuration
|
||||||
LOCAL_SHA256=$(sha256sum $FILE_NAME | awk '{print $1}')
|
readonly AUTO_UPDATE_ENABLED=true
|
||||||
SERVER_SHA256=$(curl -s $SERVER_FILE | sha256sum | awk '{print $1}')
|
|
||||||
|
|
||||||
if [ "$LOCAL_SHA256" != "$SERVER_SHA256" ]; then
|
#==============================================================================
|
||||||
echo -e "${LIGHT_GREY}[i] Updating $FILE_NAME${NC}"
|
# UTILITY FUNCTIONS
|
||||||
|
#==============================================================================
|
||||||
|
|
||||||
curl -s -o $FILE_NAME $SERVER_FILE
|
# Print formatted log messages
|
||||||
chmod +x $FILE_NAME
|
log_info() { echo -e "${LIGHT_GREY}[i] $1${NC}"; }
|
||||||
echo -e "${LIGHT_GREEN}[i] $FILE_NAME updated.${NC}"
|
log_success() { echo -e "${LIGHT_GREEN}[✓] $1${NC}"; }
|
||||||
|
log_step() { echo -e "${LIGHT_BLUE}[i] $1${NC}"; }
|
||||||
|
log_container() { echo -e "${LIGHT_BLUE}[$1] $2${NC}"; }
|
||||||
|
|
||||||
echo -e "${LIGHT_BLUE}[i] Running updated $FILE_NAME...${NC}"
|
log_warning() {
|
||||||
./$FILE_NAME
|
echo -e "${YELLOW}[!] $1${NC}";
|
||||||
exit 0
|
send_error_notification "$1" false
|
||||||
else
|
}
|
||||||
echo -e "${LIGHT_GREEN}[i] $FILE_NAME is already up to date.${NC}"
|
log_error() {
|
||||||
fi
|
echo -e "${RED}[x] $1${NC}" >&2;
|
||||||
fi
|
send_error_notification "$1" true
|
||||||
|
}
|
||||||
|
|
||||||
####################
|
# Exit with error message
|
||||||
|
die() {
|
||||||
# Navigate to docker folder
|
log_error "$1"
|
||||||
DOCKER_FOLDER=/root/docker
|
|
||||||
|
|
||||||
if [ -d "$DOCKER_FOLDER" ]; then
|
|
||||||
cd $DOCKER_FOLDER
|
|
||||||
else
|
|
||||||
echo -e "${LIGHT_GREY}[i] Docker folder not found.${NC}"
|
|
||||||
exit 1
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if a command exists
|
||||||
|
command_exists() {
|
||||||
|
command -v "$1" >/dev/null 2>&1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if Docker and Docker Compose are available
|
||||||
|
check_docker_requirements() {
|
||||||
|
log_info "Checking Docker requirements..."
|
||||||
|
|
||||||
|
if ! command_exists docker; then
|
||||||
|
die "Docker is not installed or not in PATH"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Updating Docker containers
|
if ! docker compose version >/dev/null 2>&1; then
|
||||||
|
die "Docker Compose is not available"
|
||||||
for folder in */; do
|
|
||||||
cd $DOCKER_FOLDER/$folder
|
|
||||||
|
|
||||||
# Remove trailing slash from folder name if it exists
|
|
||||||
folder=${folder%/}
|
|
||||||
echo -e "${LIGHT_BLUE}[$folder] Checking for updates..."
|
|
||||||
|
|
||||||
# if .ignore file exists, skip the folder
|
|
||||||
if [ -f ".ignore" ]; then
|
|
||||||
echo -e "${LIGHT_BLUE}[$folder] Skipping docker container update"
|
|
||||||
cd ..
|
|
||||||
continue
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Check compose files for obsolete version attribute
|
log_success "Docker requirements satisfied"
|
||||||
for compose_file in "docker-compose.yml" "docker-compose.yaml" "compose.yaml" "compose.yml"; do
|
}
|
||||||
if [ -f "$compose_file" ]; then
|
|
||||||
echo -e "${LIGHT_BLUE}[$folder] Checking $compose_file for obsolete version attribute"
|
# Get SHA256 hash of a file
|
||||||
sed -i '/^version:/d' "$compose_file"
|
get_file_hash() {
|
||||||
|
local file="$1"
|
||||||
|
sha256sum "$file" 2>/dev/null | awk '{print $1}' || echo ""
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get SHA256 hash from URL content
|
||||||
|
get_url_hash() {
|
||||||
|
local url="$1"
|
||||||
|
curl -s "$url" 2>/dev/null | sha256sum | awk '{print $1}' || echo ""
|
||||||
|
}
|
||||||
|
|
||||||
|
#==============================================================================
|
||||||
|
# DOCKER COMPOSE MANAGEMENT
|
||||||
|
#==============================================================================
|
||||||
|
|
||||||
|
# Find the active Docker Compose file in current directory
|
||||||
|
find_compose_file() {
|
||||||
|
for compose_file in "${COMPOSE_FILES[@]}"; do
|
||||||
|
if [[ -f "$compose_file" ]]; then
|
||||||
|
echo "$compose_file"
|
||||||
|
return 0
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
DOCKER_RUNNING=$(docker compose ps -q)
|
# Remove obsolete version attribute from Docker Compose files
|
||||||
|
clean_compose_files() {
|
||||||
|
local container_name="$1"
|
||||||
|
|
||||||
if [ -n "$DOCKER_RUNNING" ]; then
|
for compose_file in "${COMPOSE_FILES[@]}"; do
|
||||||
echo -e "${LIGHT_BLUE}[$folder] Stopping Docker containers"
|
if [[ -f "$compose_file" ]]; then
|
||||||
docker compose down > /dev/null
|
log_container "$container_name" "Cleaning obsolete version attribute from $compose_file"
|
||||||
|
sed -i '/^version:/d' "$compose_file" || log_warning "Failed to clean $compose_file"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if container should be skipped
|
||||||
|
should_skip_container() {
|
||||||
|
[[ -f ".ignore" ]]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if any containers are running in current directory
|
||||||
|
has_running_containers() {
|
||||||
|
local running_containers
|
||||||
|
running_containers=$(docker compose ps -q 2>/dev/null || echo "")
|
||||||
|
[[ -n "$running_containers" ]]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Update a single Docker Compose project
|
||||||
|
update_docker_project() {
|
||||||
|
local project_dir="$1"
|
||||||
|
local container_name
|
||||||
|
container_name=$(basename "$project_dir")
|
||||||
|
|
||||||
|
log_container "$container_name" "Checking for updates..."
|
||||||
|
|
||||||
|
# Change to project directory
|
||||||
|
cd "$project_dir" || {
|
||||||
|
log_error "Cannot access directory: $project_dir"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check if container should be skipped
|
||||||
|
if should_skip_container; then
|
||||||
|
log_container "$container_name" "Skipping (found .ignore file)"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Verify compose file exists
|
||||||
|
local compose_file
|
||||||
|
if ! compose_file=$(find_compose_file); then
|
||||||
|
log_container "$container_name" "No Docker Compose file found, skipping"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Clean compose files
|
||||||
|
clean_compose_files "$container_name"
|
||||||
|
|
||||||
|
# Check if containers are running
|
||||||
|
if ! has_running_containers; then
|
||||||
|
log_container "$container_name" "No running containers, skipping update"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Stop containers
|
||||||
|
log_container "$container_name" "Stopping containers..."
|
||||||
|
if ! docker compose down >/dev/null 2>&1; then
|
||||||
|
log_error "Failed to stop containers in $container_name"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Pull updated images
|
||||||
|
log_container "$container_name" "Pulling updated images..."
|
||||||
|
if ! docker compose pull -q >/dev/null 2>&1; then
|
||||||
|
log_warning "Failed to pull images for $container_name, attempting to restart anyway"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Start containers
|
||||||
|
log_container "$container_name" "Starting containers..."
|
||||||
|
if ! docker compose up -d >/dev/null 2>&1; then
|
||||||
|
log_error "Failed to start containers in $container_name"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_container "$container_name" "Update completed successfully!"
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Update all Docker Compose projects
|
||||||
|
update_all_docker_projects() {
|
||||||
|
log_step "Starting Docker container updates..."
|
||||||
|
|
||||||
|
# Check if Docker folder exists
|
||||||
|
if [[ ! -d "$DOCKER_FOLDER" ]]; then
|
||||||
|
die "Docker folder not found: $DOCKER_FOLDER"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Change to Docker folder
|
||||||
|
cd "$DOCKER_FOLDER" || die "Cannot access Docker folder: $DOCKER_FOLDER"
|
||||||
|
|
||||||
|
# Process each subdirectory
|
||||||
|
for project_dir in */; do
|
||||||
|
if [[ -d "$project_dir" ]]; then
|
||||||
|
local project_path="$DOCKER_FOLDER/$project_dir"
|
||||||
|
|
||||||
|
update_docker_project "$project_path"
|
||||||
|
|
||||||
|
# Return to Docker folder for next iteration
|
||||||
|
cd "$DOCKER_FOLDER" || die "Cannot return to Docker folder"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
#==============================================================================
|
||||||
|
# DOCKER CLEANUP
|
||||||
|
#==============================================================================
|
||||||
|
|
||||||
|
# Clean up unused Docker resources
|
||||||
|
cleanup_docker_resources() {
|
||||||
|
log_step "Cleaning up unused Docker resources..."
|
||||||
|
|
||||||
|
# Remove unused images
|
||||||
|
log_info "Removing unused Docker images..."
|
||||||
|
if docker image prune -af >/dev/null 2>&1; then
|
||||||
|
log_success "Docker image cleanup completed"
|
||||||
else
|
else
|
||||||
echo -e "${LIGHT_BLUE}[$folder] No Docker containers running, will skip update"
|
log_warning "Docker image cleanup failed"
|
||||||
continue
|
|
||||||
fi
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
echo -e "${LIGHT_BLUE}[$folder] Updating images"
|
#==============================================================================
|
||||||
docker compose pull -q > /dev/null
|
# MAIN EXECUTION
|
||||||
|
#==============================================================================
|
||||||
|
|
||||||
echo -e "${LIGHT_BLUE}[$folder] Starting Docker containers"
|
main() {
|
||||||
docker compose up -d > /dev/null
|
START_TIME=$(date +%s)
|
||||||
|
|
||||||
echo -e "${LIGHT_GREEN}[$folder] Updated!"
|
log_step "Starting Docker Container Updater"
|
||||||
|
echo
|
||||||
|
|
||||||
cd $DOCKER_FOLDER
|
# Check requirements
|
||||||
done
|
check_docker_requirements
|
||||||
|
|
||||||
# Run Docker image prune
|
# Update all Docker projects
|
||||||
|
update_all_docker_projects
|
||||||
|
|
||||||
echo -e "${LIGHT_BLUE}Running Docker image prune..."
|
# Clean up Docker resources
|
||||||
docker image prune -af
|
cleanup_docker_resources
|
||||||
|
|
||||||
echo -e "${LIGHT_GREEN} All done!"
|
echo
|
||||||
|
log_success "Docker container update process completed!"
|
||||||
|
|
||||||
|
END_TIME=$(date +%s)
|
||||||
|
DURATION=$((END_TIME - START_TIME))
|
||||||
|
log_info "Total duration: $DURATION seconds"
|
||||||
|
|
||||||
|
send_update_notification $DURATION
|
||||||
|
}
|
||||||
|
|
||||||
|
# Execute main function with all arguments
|
||||||
|
main "$@"
|
20
haven-notify/Dockerfile
Normal file
20
haven-notify/Dockerfile
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# Start from the official Golang image for building
|
||||||
|
FROM --platform=$BUILDPLATFORM golang:1.22-alpine AS builder
|
||||||
|
|
||||||
|
ARG TARGETARCH
|
||||||
|
ARG TARGETOS
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
COPY . .
|
||||||
|
# Build statically for Linux
|
||||||
|
RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o haven-notify main.go
|
||||||
|
|
||||||
|
# Use Alpine for running, with CA certificates for TLS
|
||||||
|
FROM alpine:latest
|
||||||
|
WORKDIR /app
|
||||||
|
RUN apk --no-cache add ca-certificates
|
||||||
|
COPY template/ template/
|
||||||
|
COPY --from=builder /app/haven-notify .
|
||||||
|
EXPOSE 8080
|
||||||
|
ENV WEBHOOK_URL=""
|
||||||
|
ENTRYPOINT ["/app/haven-notify"]
|
95
haven-notify/README.md
Normal file
95
haven-notify/README.md
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
<div align="center">
|
||||||
|
<img src="./assets/widelogo.png" alt="Haven Notify Logo">
|
||||||
|
</div>
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
Haven Notify is an internal service designed to send notifications to a specified Discord channel.
|
||||||
|
|
||||||
|
It's built in Go and can be deployed as a container or managed service.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
- Go 1.18 or newer
|
||||||
|
- Docker
|
||||||
|
- A Discord Webhook URL
|
||||||
|
|
||||||
|
## API Specification
|
||||||
|
|
||||||
|
### Send Notification
|
||||||
|
- **Endpoint**: `/notify`
|
||||||
|
- **Method**: `POST`
|
||||||
|
- **Request Body**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"title": "Notification Title",
|
||||||
|
"message": "Notification Message"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Send Backup Notification
|
||||||
|
- **Endpoint**: `/template/notify/backup`
|
||||||
|
- **Method**: `POST`
|
||||||
|
- **Request Body**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"title": "Notification Title",
|
||||||
|
"asset": "Notification Asset Name",
|
||||||
|
"backupSizeInMB": 500,
|
||||||
|
"extra": [
|
||||||
|
{
|
||||||
|
"name": "Additional Info",
|
||||||
|
"value": "Some extra information"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Send Update Notification
|
||||||
|
- **Endpoint**: `/template/notify/update`
|
||||||
|
- **Method**: `POST`
|
||||||
|
- **Request Body**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"host": "Notification Title",
|
||||||
|
"asset": "Notification Message",
|
||||||
|
"time": 500 // in seconds
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Send Error Notification
|
||||||
|
- **Endpoint**: `/template/notify/error`
|
||||||
|
- **Method**: `POST`
|
||||||
|
- **Request Body**:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"caller": "Who triggered the error",
|
||||||
|
"message": "Error while moving file",
|
||||||
|
"critical": true,
|
||||||
|
"extra": [
|
||||||
|
{
|
||||||
|
"name": "Additional Info",
|
||||||
|
"value": "Some extra information"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Setup & Usage
|
||||||
|
|
||||||
|
### Docker
|
||||||
|
1. Build the Docker image:
|
||||||
|
```sh
|
||||||
|
docker build -t haven-notify .
|
||||||
|
```
|
||||||
|
2. Run the container:
|
||||||
|
```sh
|
||||||
|
docker run -e WEBHOOK_URL=your_webhook_url haven-notify
|
||||||
|
```
|
||||||
|
|
||||||
|
### Kubernetes
|
||||||
|
Deployment manifest is available at `deploy/haven-notify.yaml`.
|
||||||
|
1. Edit the manifest to set your environment variables.
|
||||||
|
2. Create a generic secret named `WEBHOOK_URL` with `discord-webhook=your_webhook_url`
|
||||||
|
3. Apply deployment:
|
||||||
|
```sh
|
||||||
|
kubectl apply -f deploy/haven-notify.yaml
|
||||||
|
```
|
BIN
haven-notify/assets/widelogo.png
Normal file
BIN
haven-notify/assets/widelogo.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 334 KiB |
73
haven-notify/deploy/haven-notify.yaml
Normal file
73
haven-notify/deploy/haven-notify.yaml
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: haven-notify
|
||||||
|
labels:
|
||||||
|
app: haven-notify
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: haven-notify
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: haven-notify
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: haven-notify
|
||||||
|
image: git.ivanch.me/ivanch/haven-notify:latest
|
||||||
|
imagePullPolicy: Always
|
||||||
|
ports:
|
||||||
|
- containerPort: 8080
|
||||||
|
env:
|
||||||
|
- name: WEBHOOK_URL
|
||||||
|
valueFrom:
|
||||||
|
secretKeyRef:
|
||||||
|
name: discord-webhook
|
||||||
|
key: HAVEN_WEBHOOK_URL
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /ready
|
||||||
|
port: 8080
|
||||||
|
initialDelaySeconds: 5
|
||||||
|
periodSeconds: 10
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /live
|
||||||
|
port: 8080
|
||||||
|
initialDelaySeconds: 5
|
||||||
|
periodSeconds: 10
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: haven-notify
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app: haven-notify
|
||||||
|
ports:
|
||||||
|
- protocol: TCP
|
||||||
|
port: 8080
|
||||||
|
targetPort: 8080
|
||||||
|
---
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: haven-notify
|
||||||
|
namespace: default
|
||||||
|
annotations:
|
||||||
|
traefik.ingress.kubernetes.io/router.entrypoints: web
|
||||||
|
spec:
|
||||||
|
ingressClassName: nginx
|
||||||
|
rules:
|
||||||
|
- host: notify.haven
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: Prefix
|
||||||
|
backend:
|
||||||
|
service:
|
||||||
|
name: haven-notify
|
||||||
|
port:
|
||||||
|
number: 8080
|
214
haven-notify/main.go
Normal file
214
haven-notify/main.go
Normal file
@@ -0,0 +1,214 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"html/template"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Notification payload
|
||||||
|
type Notification struct {
|
||||||
|
Title string `json:"title"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
http.HandleFunc("/notify", notifyHandler)
|
||||||
|
http.HandleFunc("/ready", readinessHandler)
|
||||||
|
http.HandleFunc("/live", livenessHandler)
|
||||||
|
http.HandleFunc("/template/notify/", templateNotifyHandler)
|
||||||
|
log.Println("Starting server on :8080...")
|
||||||
|
log.Fatal(http.ListenAndServe(":8080", nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
func notifyHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
log.Printf("Incoming %s request from %s to %s", r.Method, r.RemoteAddr, r.URL.Path)
|
||||||
|
if r.Method != http.MethodPost {
|
||||||
|
log.Printf("Method not allowed: %s", r.Method)
|
||||||
|
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||||
|
w.Write([]byte("Method not allowed"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var notif Notification
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(¬if); err != nil {
|
||||||
|
log.Printf("Invalid payload: %v", err)
|
||||||
|
w.WriteHeader(http.StatusBadRequest)
|
||||||
|
w.Write([]byte("Invalid payload"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Printf("Received notification payload: Title='%s', Message='%s'", notif.Title, notif.Message)
|
||||||
|
|
||||||
|
// Call Discord notification function
|
||||||
|
if err := sendDiscordNotification(notif.Title, notif.Message); err != nil {
|
||||||
|
log.Printf("Failed to send Discord notification: %v", err)
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
w.Write([]byte("Failed to send Discord notification"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Notification sent successfully for Title='%s'", notif.Title)
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
w.Write([]byte("Notification sent"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Readiness handler
|
||||||
|
func readinessHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
w.Write([]byte("Ready"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Liveness handler
|
||||||
|
func livenessHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
w.Write([]byte("Alive"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sendDiscordNotification(title, message string) error {
|
||||||
|
webhookURL := os.Getenv("WEBHOOK_URL")
|
||||||
|
if webhookURL == "" {
|
||||||
|
log.Printf("WEBHOOK_URL environment variable not set")
|
||||||
|
return fmt.Errorf("WEBHOOK_URL environment variable not set")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Discord webhook payload
|
||||||
|
type discordPayload struct {
|
||||||
|
Content string `json:"content"`
|
||||||
|
}
|
||||||
|
|
||||||
|
content := "**" + title + "**\n" + message
|
||||||
|
payload := discordPayload{Content: content}
|
||||||
|
|
||||||
|
jsonData, err := json.Marshal(payload)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Failed to marshal Discord payload: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Sending Discord notification: Title='%s', Message='%s'", title, message)
|
||||||
|
resp, err := http.Post(webhookURL, "application/json", bytes.NewBuffer(jsonData))
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Error posting to Discord webhook: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||||
|
log.Printf("Discord webhook returned status: %s", resp.Status)
|
||||||
|
return fmt.Errorf("Discord webhook returned status: %s", resp.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Discord notification sent successfully: Title='%s'", title)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func templateNotifyHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
log.Printf("Incoming %s request from %s to %s", r.Method, r.RemoteAddr, r.URL.Path)
|
||||||
|
if r.Method != http.MethodPost {
|
||||||
|
log.Printf("Method not allowed: %s", r.Method)
|
||||||
|
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||||
|
w.Write([]byte("Method not allowed"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
templateName := r.URL.Path[len("/template/notify/"):] // Extract template name
|
||||||
|
if templateName == "" {
|
||||||
|
log.Printf("Template name not provided")
|
||||||
|
w.WriteHeader(http.StatusBadRequest)
|
||||||
|
w.Write([]byte("Template name not provided"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
templatePath := "template/" + templateName + ".tmpl"
|
||||||
|
templateData, err := ioutil.ReadFile(templatePath)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Failed to read template: %v", err)
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
w.Write([]byte("Failed to read template"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
tmpl, err := template.New(templateName).Funcs(template.FuncMap{
|
||||||
|
"formatSize": func(size float64) string {
|
||||||
|
if size > 1024 {
|
||||||
|
return fmt.Sprintf("%.2f GiB", size/1024)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%.2f MiB", size)
|
||||||
|
},
|
||||||
|
"upper": strings.ToUpper,
|
||||||
|
"lower": strings.ToLower,
|
||||||
|
"title": strings.Title,
|
||||||
|
"now": func() string {
|
||||||
|
return fmt.Sprintf("%d", time.Now().Unix())
|
||||||
|
},
|
||||||
|
"formatTime": func(timestamp string) string {
|
||||||
|
if timestamp == "" {
|
||||||
|
return time.Now().Format("2006-01-02T15:04:05Z")
|
||||||
|
}
|
||||||
|
return timestamp
|
||||||
|
},
|
||||||
|
}).Parse(string(templateData))
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Failed to parse template: %v", err)
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
w.Write([]byte("Failed to parse template"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var rawPayload map[string]interface{}
|
||||||
|
if err := json.NewDecoder(r.Body).Decode(&rawPayload); err != nil {
|
||||||
|
log.Printf("Invalid payload: %v", err)
|
||||||
|
w.WriteHeader(http.StatusBadRequest)
|
||||||
|
w.Write([]byte("Invalid payload"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normalize keys to lowercase for case-insensitive parsing
|
||||||
|
payload := make(map[string]interface{})
|
||||||
|
for key, value := range rawPayload {
|
||||||
|
payload[strings.ToLower(key)] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
var filledTemplate bytes.Buffer
|
||||||
|
if err := tmpl.Execute(&filledTemplate, payload); err != nil {
|
||||||
|
log.Printf("Failed to execute template: %v", err)
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
w.Write([]byte("Failed to execute template"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
webhookURL := os.Getenv("WEBHOOK_URL")
|
||||||
|
if webhookURL == "" {
|
||||||
|
log.Printf("WEBHOOK_URL environment variable not set")
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
w.Write([]byte("WEBHOOK_URL environment variable not set"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := http.Post(webhookURL, "application/json", &filledTemplate)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Error posting to Discord webhook: %v", err)
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
w.Write([]byte("Failed to send notification"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||||
|
log.Printf("Discord webhook returned status: %s", resp.Status)
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
w.Write([]byte("Failed to send notification"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Notification sent successfully using template '%s'", templateName)
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
w.Write([]byte("Notification sent"))
|
||||||
|
}
|
39
haven-notify/template/backup.tmpl
Normal file
39
haven-notify/template/backup.tmpl
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
{{/*
|
||||||
|
Docker Backup Notification Template
|
||||||
|
Variables expected:
|
||||||
|
- .title: The backup title/name
|
||||||
|
- .asset: The asset being backed up
|
||||||
|
- .backupsizeinmb: The backup size in MB (will be formatted automatically)
|
||||||
|
- .extra: Optional array of additional fields with .name and .value
|
||||||
|
|
||||||
|
Template Functions Available:
|
||||||
|
- formatSize: Formats size in MB/GB automatically
|
||||||
|
*/}}
|
||||||
|
{
|
||||||
|
"embeds": [
|
||||||
|
{
|
||||||
|
"title": "📦 Backup - {{.title}}",
|
||||||
|
"description": "**{{.asset}}** has been backup-ed successfully! ✅🫡\n",
|
||||||
|
"color": 3066993,
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "💾 Backup Size",
|
||||||
|
"value": "{{if .backupsizeinmb}}{{formatSize .backupsizeinmb}}{{else}}Unknown{{end}}",
|
||||||
|
"inline": true
|
||||||
|
}
|
||||||
|
{{- if .extra}}
|
||||||
|
{{- range $index, $field := .extra}},
|
||||||
|
{
|
||||||
|
"name": "{{$field.name}}",
|
||||||
|
"value": "{{$field.value}}",
|
||||||
|
"inline": true
|
||||||
|
}
|
||||||
|
{{- end}}
|
||||||
|
{{- end}}
|
||||||
|
],
|
||||||
|
"footer": {
|
||||||
|
"text": "✨ Haven Notify ✨"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
36
haven-notify/template/error.tmpl
Normal file
36
haven-notify/template/error.tmpl
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
{{/*
|
||||||
|
Error Notification Template
|
||||||
|
Variables expected:
|
||||||
|
- .caller: The caller of the error
|
||||||
|
- .message: The error message
|
||||||
|
- .critical: Boolean indicating if the error is critical
|
||||||
|
- .extra: Optional array of additional fields with .name and .value
|
||||||
|
*/}}
|
||||||
|
{
|
||||||
|
"embeds": [
|
||||||
|
{
|
||||||
|
"title": "❌ Error",
|
||||||
|
"description": "**{{.caller}}** encountered an error!",
|
||||||
|
"color": {{if .critical}}15158332{{else}}15844367{{end}},
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "📄 Message",
|
||||||
|
"value": "{{.message}}",
|
||||||
|
"inline": false
|
||||||
|
}
|
||||||
|
{{- if .extra}}
|
||||||
|
{{- range $index, $field := .extra}},
|
||||||
|
{
|
||||||
|
"name": "{{$field.name}}",
|
||||||
|
"value": "{{$field.value}}",
|
||||||
|
"inline": true
|
||||||
|
}
|
||||||
|
{{- end}}
|
||||||
|
{{- end}}
|
||||||
|
],
|
||||||
|
"footer": {
|
||||||
|
"text": "✨ Haven Notify ✨"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
29
haven-notify/template/update.tmpl
Normal file
29
haven-notify/template/update.tmpl
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{{/*
|
||||||
|
Update Notification Template
|
||||||
|
Variables expected:
|
||||||
|
- .host: The host where the update occurred
|
||||||
|
- .asset: The asset being updated (Docker or k8s)
|
||||||
|
- .time: The time in seconds that the script took to run
|
||||||
|
|
||||||
|
Template Functions Available:
|
||||||
|
- formatTime: Formats time in seconds to a human-readable format
|
||||||
|
*/}}
|
||||||
|
{
|
||||||
|
"embeds": [
|
||||||
|
{
|
||||||
|
"title": "🔄 Update - {{.asset}}",
|
||||||
|
"description": "**{{.host}}** has successfully updated **{{.asset}}**! ✅",
|
||||||
|
"color": 3447003,
|
||||||
|
"fields": [
|
||||||
|
{
|
||||||
|
"name": "⏱️ Time Taken",
|
||||||
|
"value": "{{if .time}}{{.time}}{{else}}Unknown{{end}} seconds",
|
||||||
|
"inline": true
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"footer": {
|
||||||
|
"text": "✨ Haven Notify ✨"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
@@ -4,6 +4,9 @@
|
|||||||
# For each folder on BACKUP_SOURCE, it gets the sha256 checksum of it, checks the checksum against the previous backup, and if it has changed, it creates a 7zip archive of the folder with encryption.
|
# For each folder on BACKUP_SOURCE, it gets the sha256 checksum of it, checks the checksum against the previous backup, and if it has changed, it creates a 7zip archive of the folder with encryption.
|
||||||
# It then uploads the archive to Google Drive using rclone.
|
# It then uploads the archive to Google Drive using rclone.
|
||||||
|
|
||||||
|
# Install: curl -sSL https://git.ivanch.me/ivanch/server-scripts/raw/branch/main/nas-gdrive-backup.sh | bash -s -- --install
|
||||||
|
# Run manually: /usr/local/bin/nas-gdrive-backup.sh
|
||||||
|
|
||||||
# Configuration
|
# Configuration
|
||||||
BACKUP_SOURCE="/export/Backup"
|
BACKUP_SOURCE="/export/Backup"
|
||||||
META_DIR="/export/Backup/.gdrive"
|
META_DIR="/export/Backup/.gdrive"
|
||||||
@@ -14,13 +17,6 @@ GDRIVE_PATH="/NAS-Backups"
|
|||||||
ARCHIVE_NAME="backup.7z"
|
ARCHIVE_NAME="backup.7z"
|
||||||
LOG_FILE="/var/log/nas-gdrive-backup.log"
|
LOG_FILE="/var/log/nas-gdrive-backup.log"
|
||||||
|
|
||||||
# Check for install flag
|
|
||||||
# Usage: curl -sSL https://git.ivanch.me/ivanch/server-scripts/raw/branch/main/nas-gdrive-backup.sh | bash -s -- --install
|
|
||||||
if [[ "$1" == "--install" ]]; then
|
|
||||||
install_script
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Function for logging
|
# Function for logging
|
||||||
log() {
|
log() {
|
||||||
echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "$LOG_FILE"
|
echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "$LOG_FILE"
|
||||||
@@ -33,32 +29,11 @@ clean_up() {
|
|||||||
|
|
||||||
trap clean_up EXIT
|
trap clean_up EXIT
|
||||||
|
|
||||||
# Check if 7z is installed
|
|
||||||
if ! which 7z; then
|
|
||||||
log "ERROR: 7z is not installed"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check if rclone is installed
|
|
||||||
if ! which rclone; then
|
|
||||||
log "ERROR: rclone is not installed"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Create meta directory if it doesn't exist
|
|
||||||
if [ ! -d "$META_DIR" ]; then
|
|
||||||
log "Creating meta directory: $META_DIR"
|
|
||||||
mkdir -p "$META_DIR"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Fix permissions for the meta directory (777 recursively)
|
|
||||||
chmod -R 777 "$META_DIR"
|
|
||||||
|
|
||||||
create_7z() {
|
create_7z() {
|
||||||
local folder="$1"
|
local folder="$1"
|
||||||
local archive_name="$2"
|
local archive_name="$2"
|
||||||
log "Creating 7zip archive of $folder"
|
log "Creating 7zip archive of $folder"
|
||||||
7z a -p"$ZIP_PASSWORD" -mhe=on -mx=5 "$archive_name" "$folder"
|
7z a -p"$ZIP_PASSWORD" -mhe=on -mx=3 "$archive_name" "$folder"
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
log "ERROR: Failed to create 7zip archive of $folder"
|
log "ERROR: Failed to create 7zip archive of $folder"
|
||||||
fi
|
fi
|
||||||
@@ -81,6 +56,28 @@ upload_to_gdrive() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
main() {
|
||||||
|
# Check if 7z is installed
|
||||||
|
if ! which 7z > /dev/null; then
|
||||||
|
log "ERROR: 7z is not installed"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check if rclone is installed
|
||||||
|
if ! which rclone > /dev/null; then
|
||||||
|
log "ERROR: rclone is not installed"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create meta directory if it doesn't exist
|
||||||
|
if [ ! -d "$META_DIR" ]; then
|
||||||
|
log "Creating meta directory: $META_DIR"
|
||||||
|
mkdir -p "$META_DIR"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Fix permissions for the meta directory (777 recursively)
|
||||||
|
chmod -R 777 "$META_DIR"
|
||||||
|
|
||||||
# Loop through each folder in the backup source
|
# Loop through each folder in the backup source
|
||||||
for folder in "$BACKUP_SOURCE"/*; do
|
for folder in "$BACKUP_SOURCE"/*; do
|
||||||
if [ -d "$folder" ]; then
|
if [ -d "$folder" ]; then
|
||||||
@@ -127,11 +124,18 @@ for folder in "$BACKUP_SOURCE"/*; do
|
|||||||
log ""
|
log ""
|
||||||
done
|
done
|
||||||
|
|
||||||
|
# Fix permissions for the meta directory (777 recursively)
|
||||||
|
chmod -R 777 "$META_DIR"
|
||||||
|
|
||||||
log "Backup process completed successfully"
|
log "Backup process completed successfully"
|
||||||
|
|
||||||
# Exit with success
|
# Exit with success
|
||||||
exit 0
|
exit 0
|
||||||
|
}
|
||||||
|
|
||||||
|
###########################
|
||||||
|
### Installation script ###
|
||||||
|
###########################
|
||||||
# Function to install a dependency if not already installed
|
# Function to install a dependency if not already installed
|
||||||
install_dependency() {
|
install_dependency() {
|
||||||
local package="$1"
|
local package="$1"
|
||||||
@@ -170,9 +174,12 @@ install_script() {
|
|||||||
|
|
||||||
install_log_separator
|
install_log_separator
|
||||||
# Check if running as root
|
# Check if running as root
|
||||||
|
install_log_info "Checking if the script is running as root"
|
||||||
if [ "$(id -u)" -ne 0 ]; then
|
if [ "$(id -u)" -ne 0 ]; then
|
||||||
install_log_error "ERROR: This script must be run as root"
|
install_log_error "ERROR: This script must be run as root"
|
||||||
exit 1
|
exit 1
|
||||||
|
else
|
||||||
|
install_log_ok "Running as root"
|
||||||
fi
|
fi
|
||||||
install_log_separator
|
install_log_separator
|
||||||
|
|
||||||
@@ -201,7 +208,7 @@ install_script() {
|
|||||||
install_log_error "ERROR: ZIP_PASSWORD is not set"
|
install_log_error "ERROR: ZIP_PASSWORD is not set"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
read -p "Enter ZIP_PASSWORD: " ZIP_PASSWORD
|
read -p "Enter ZIP_PASSWORD: " ZIP_PASSWORD </dev/tty
|
||||||
if [ -z "$ZIP_PASSWORD" ]; then
|
if [ -z "$ZIP_PASSWORD" ]; then
|
||||||
install_log_error "ERROR: ZIP_PASSWORD cannot be empty"
|
install_log_error "ERROR: ZIP_PASSWORD cannot be empty"
|
||||||
exit 1
|
exit 1
|
||||||
@@ -229,6 +236,24 @@ install_script() {
|
|||||||
fi
|
fi
|
||||||
install_log_info "Setting permissions for $META_DIR and $TMP_DIR to 777"
|
install_log_info "Setting permissions for $META_DIR and $TMP_DIR to 777"
|
||||||
chmod -R 777 "$META_DIR" "$TMP_DIR"
|
chmod -R 777 "$META_DIR" "$TMP_DIR"
|
||||||
|
install_log_ok "Directories checked and are ok"
|
||||||
|
|
||||||
|
# Check for existing .sha256 files, if there are any, prompt to remove them
|
||||||
|
install_log_info "Verifying existing .sha256 files in $META_DIR"
|
||||||
|
for file in "$META_DIR"/*; do
|
||||||
|
if [ -f "$file" ] && [[ "$file" == *.sha256 ]]; then
|
||||||
|
install_log_info "Found .sha256 file: \e[96m\e[4m$file\e[0m"
|
||||||
|
read -p "Do you want to remove this file? [y/N]: " choice </dev/tty
|
||||||
|
if [[ "$choice" == "y" || "$choice" == "Y" ]]; then
|
||||||
|
install_log_info "Removing $file"
|
||||||
|
rm "$file"
|
||||||
|
else
|
||||||
|
install_log_info "Skipping $file"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
install_log_ok "Existing .sha256 files checked"
|
||||||
|
|
||||||
install_log_separator
|
install_log_separator
|
||||||
|
|
||||||
install_log_info "Setting up rclone configuration"
|
install_log_info "Setting up rclone configuration"
|
||||||
@@ -241,8 +266,8 @@ install_script() {
|
|||||||
install_log_separator
|
install_log_separator
|
||||||
|
|
||||||
install_log_info "Setting up cron job for backup script"
|
install_log_info "Setting up cron job for backup script"
|
||||||
(crontab -l 2>/dev/null; echo "30 23 * * 1,5 $0 > /tmp/nas-gdrive-backup.log") | crontab -
|
(crontab -l 2>/dev/null; echo "55 23 * * 1 /usr/local/bin/nas-gdrive-backup.sh > /tmp/nas-gdrive-backup.log") | crontab -
|
||||||
install_log_ok "Cron job set up to run $0 every Monday and Friday at 23:30"
|
install_log_ok "Cron job set up to run /usr/local/bin/nas-gdrive-backup.sh every Monday at 23:55"
|
||||||
install_log_separator
|
install_log_separator
|
||||||
|
|
||||||
echo -e ""
|
echo -e ""
|
||||||
@@ -250,9 +275,18 @@ install_script() {
|
|||||||
install_log_ok "Installation completed successfully!"
|
install_log_ok "Installation completed successfully!"
|
||||||
install_log_separator
|
install_log_separator
|
||||||
echo -e ""
|
echo -e ""
|
||||||
echo -e "You can now run the script manually with: \e[32m/usr/local/bin/nas-gdrive-backup.sh\e[0m"
|
echo -e "You can now run the script manually with: \e[32mnas-gdrive-backup.sh\e[0m"
|
||||||
echo -e "Or it will run automatically according to the cron schedule."
|
echo -e "Or it will run automatically according to the cron schedule."
|
||||||
|
|
||||||
# Exit with success
|
# Exit with success
|
||||||
exit 0
|
exit 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Check for install flag
|
||||||
|
if [[ "$1" == "--install" ]]; then
|
||||||
|
install_script
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
main "$@"
|
||||||
|
exit 0
|
@@ -1,193 +1,330 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Usage:
|
# Usage: curl -sSL https://git.ivanch.me/ivanch/server-scripts/raw/branch/main/scripts-download.sh | bash
|
||||||
## curl -sSL https://git.ivanch.me/ivanch/server-scripts/raw/branch/main/scripts-download.sh | bash
|
|
||||||
|
|
||||||
# colors
|
set -euo pipefail
|
||||||
RED='\033[1;31m'
|
|
||||||
GREEN='\033[1;32m'
|
|
||||||
NC='\033[0m'
|
|
||||||
LIGHT_BLUE='\033[1;34m'
|
|
||||||
LIGHT_RED='\033[1;31m'
|
|
||||||
LIGHT_GREEN='\033[1;32m'
|
|
||||||
GREY='\033[1;30m'
|
|
||||||
YELLOW='\033[1;33m'
|
|
||||||
|
|
||||||
FILES_URL="https://git.ivanch.me/ivanch/server-scripts/raw/branch/main"
|
#==============================================================================
|
||||||
|
# CONFIGURATION
|
||||||
|
#==============================================================================
|
||||||
|
|
||||||
echo -e "\r${LIGHT_BLUE}[i] Running scripts-download.sh"
|
# Color definitions for output formatting
|
||||||
|
readonly RED='\033[1;31m'
|
||||||
|
readonly GREEN='\033[1;32m'
|
||||||
|
readonly NC='\033[0m'
|
||||||
|
readonly LIGHT_BLUE='\033[1;34m'
|
||||||
|
readonly LIGHT_RED='\033[1;31m'
|
||||||
|
readonly LIGHT_GREEN='\033[1;32m'
|
||||||
|
readonly GREY='\033[1;30m'
|
||||||
|
readonly YELLOW='\033[1;33m'
|
||||||
|
|
||||||
# Detect OS (Debian or Alpine)
|
# Configuration
|
||||||
echo -e "${GREY}[i] Detecting OS..."
|
readonly FILES_URL="https://git.ivanch.me/ivanch/server-scripts/raw/branch/main"
|
||||||
|
readonly REQUIRED_PACKAGES=("zip" "unzip" "curl")
|
||||||
|
readonly REQUIRED_COMMANDS=("zip" "unzip" "sha256sum" "curl" "crontab")
|
||||||
|
readonly AVAILABLE_SCRIPTS=("clean.sh" "backup.sh" "docker-updater.sh")
|
||||||
|
|
||||||
DETECTED=""
|
# Format: [script_name]="cron_schedule"
|
||||||
|
declare -A CRONTAB_SCHEDULES=(
|
||||||
|
["clean.sh"]="0 3 * * *" # Daily at 3 AM
|
||||||
|
["backup.sh"]="0 23 * * 1,5" # Monday and Friday at 11 PM
|
||||||
|
["docker-updater.sh"]="0 3 * * 6" # Every Saturday at 3 AM
|
||||||
|
)
|
||||||
|
|
||||||
if [ -x "$(command -v apk)" ]; then
|
#==============================================================================
|
||||||
DETECTED="Alpine"
|
# UTILITY FUNCTIONS
|
||||||
fi
|
#==============================================================================
|
||||||
|
|
||||||
if [ -x "$(command -v apt)" ]; then
|
# Print formatted log messages
|
||||||
DETECTED="Debian"
|
log_info() { echo -e "${GREY}[i] $1${NC}"; }
|
||||||
fi
|
log_success() { echo -e "${GREEN}[✓] $1${NC}"; }
|
||||||
|
log_warning() { echo -e "${YELLOW}[!] $1${NC}"; }
|
||||||
|
log_error() { echo -e "${RED}[x] $1${NC}" >&2; }
|
||||||
|
log_step() { echo -e "${LIGHT_BLUE}[i] $1${NC}"; }
|
||||||
|
|
||||||
if [ -z "$DETECTED" ]; then
|
# Exit with error message
|
||||||
echo -e "${RED}[x] Error: OS not supported.${NC}" >&2
|
die() {
|
||||||
|
log_error "$1"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
|
||||||
|
|
||||||
echo -e "${GREEN}[✓] Detected '$DETECTED' Linux.${NC}"
|
|
||||||
|
|
||||||
|
|
||||||
echo -e "${GREY}[i] Checking if required packages are installed..."
|
|
||||||
|
|
||||||
PACKAGES=("zip" "unzip" "sha256sum" "curl" "crontab")
|
|
||||||
NOT_INSLALLED=()
|
|
||||||
detect_packages() {
|
|
||||||
for PACKAGE in "${PACKAGES[@]}"; do
|
|
||||||
if ! [ -x "$(command -v $PACKAGE)" ]; then
|
|
||||||
echo -e "${YELLOW}[!] Error: $PACKAGE is not installed, will attempt to install later.${NC}" >&2
|
|
||||||
NOT_INSLALLED+=($PACKAGE)
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
}
|
}
|
||||||
|
|
||||||
detect_packages
|
# Check if a command exists
|
||||||
|
command_exists() {
|
||||||
|
command -v "$1" >/dev/null 2>&1
|
||||||
|
}
|
||||||
|
|
||||||
if [ ${#NOT_INSLALLED[@]} -ne 0 ]; then
|
# Check if a process is running
|
||||||
if [ "$DETECTED" == "Alpine" ]; then
|
process_running() {
|
||||||
echo -e "${GREY}[i] Installing required packages using APK...${NC}"
|
pgrep "$1" >/dev/null 2>&1
|
||||||
echo -e "${GREY}[i] Updating APK...${NC}"
|
}
|
||||||
apk update >/dev/null
|
|
||||||
echo -e "${GREY}[i] Installing packages...${NC}"
|
|
||||||
apk add --no-cache ${NOT_INSLALLED[@]} >/dev/null
|
|
||||||
|
|
||||||
if [ $? -ne 0 ]; then
|
#==============================================================================
|
||||||
echo -e "${RED}[x] Error: Failed to install required packages.${NC}" >&2
|
# MAIN FUNCTIONS
|
||||||
exit 1
|
#==============================================================================
|
||||||
|
|
||||||
|
# Detect the operating system
|
||||||
|
detect_operating_system() {
|
||||||
|
if command_exists apk; then
|
||||||
|
echo "Alpine"
|
||||||
|
elif command_exists apt; then
|
||||||
|
echo "Debian"
|
||||||
else
|
else
|
||||||
echo -e "${GREEN}[✓] All required packages should be installed.${NC}"
|
die "Unsupported operating system. This script supports Alpine and Debian-based systems only."
|
||||||
fi
|
fi
|
||||||
elif [ "$DETECTED" == "Debian" ]; then
|
}
|
||||||
echo -e "${GREY}[i] Installing required packages using APT...${NC}"
|
|
||||||
echo -e "${GREY}[i] Updating APT...${NC}"
|
|
||||||
apt-get update -y >/dev/null
|
|
||||||
echo -e "${GREY}[i] Installing packages...${NC}"
|
|
||||||
apt-get install -y ${NOT_INSLALLED[@]} >/dev/null
|
|
||||||
|
|
||||||
if [ $? -ne 0 ]; then
|
# Check for missing packages
|
||||||
echo -e "${RED}[x] Error: Failed to install required packages.${NC}" >&2
|
get_missing_packages() {
|
||||||
exit 1
|
local missing=()
|
||||||
else
|
|
||||||
echo -e "${GREEN}[✓] All required packages should be installed.${NC}"
|
# Check each required command and map to package names
|
||||||
|
if ! command_exists "zip"; then
|
||||||
|
missing+=("zip")
|
||||||
fi
|
fi
|
||||||
|
if ! command_exists "unzip"; then
|
||||||
|
missing+=("unzip")
|
||||||
|
fi
|
||||||
|
if ! command_exists "curl"; then
|
||||||
|
missing+=("curl")
|
||||||
|
fi
|
||||||
|
# sha256sum is part of coreutils (usually pre-installed)
|
||||||
|
# crontab is part of cron package, but we'll check for cron service later
|
||||||
|
|
||||||
|
# Only print if there are missing packages
|
||||||
|
if [[ ${#missing[@]} -gt 0 ]]; then
|
||||||
|
printf '%s\n' "${missing[@]}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Install packages based on the detected OS
|
||||||
|
install_packages() {
|
||||||
|
local os="$1"
|
||||||
|
shift
|
||||||
|
local packages=("$@")
|
||||||
|
|
||||||
|
if [[ ${#packages[@]} -eq 0 ]]; then
|
||||||
|
log_info "No packages to install"
|
||||||
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
NOT_INSLALLED=()
|
log_info "Installing required packages: ${packages[*]}"
|
||||||
detect_packages
|
log_info "Debug: Installing ${#packages[@]} packages on $os"
|
||||||
|
|
||||||
if [ ${#NOT_INSLALLED[@]} -ne 0 ]; then
|
case "$os" in
|
||||||
echo -e "${RED}[x] Error: Failed to run some of the required packages.${NC}" >&2
|
"Alpine")
|
||||||
echo -e "${RED}[x] [${NOT_INSLALLED[@]}] are not installed.${NC}" >&2
|
log_info "Updating APK package index..."
|
||||||
exit 1
|
apk update >/dev/null || die "Failed to update APK package index"
|
||||||
fi
|
|
||||||
|
log_info "Installing packages via APK..."
|
||||||
|
apk add --no-cache "${packages[@]}" >/dev/null || die "Failed to install packages via APK"
|
||||||
|
;;
|
||||||
|
"Debian")
|
||||||
|
log_info "Ensuring /var/cache/apt/archives/partial exists..."
|
||||||
|
mkdir -p /var/cache/apt/archives/partial || die "Failed to create /var/cache/apt/archives/partial"
|
||||||
|
log_info "Updating APT package index..."
|
||||||
|
apt-get update -y >/dev/null || die "Failed to update APT package index"
|
||||||
|
|
||||||
|
log_info "Installing packages via APT..."
|
||||||
|
apt-get install -y "${packages[@]}" >/dev/null || die "Failed to install packages via APT"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
log_error "Debug info - OS variable content: '$os'"
|
||||||
|
log_error "Debug info - OS variable length: ${#os}"
|
||||||
|
die "Unknown operating system: '$os'"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
# Verify all required packages are available
|
||||||
|
verify_packages() {
|
||||||
|
log_info "Verifying package installation..."
|
||||||
|
|
||||||
|
local missing_packages
|
||||||
|
readarray -t missing_packages < <(get_missing_packages)
|
||||||
|
|
||||||
|
if [[ ${#missing_packages[@]} -gt 0 ]]; then
|
||||||
|
log_error "Failed to install required packages: ${missing_packages[*]}"
|
||||||
|
die "Please install the missing packages manually and try again"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo -e "${GREEN}[✓] All required packages are installed.${NC}"
|
log_success "All required packages are available"
|
||||||
|
}
|
||||||
|
|
||||||
echo -e "${GREY}[i] Checking if crontab is running..."
|
# Check if crontab service is running
|
||||||
|
check_crontab_service() {
|
||||||
|
log_info "Checking crontab service status..."
|
||||||
|
|
||||||
# Check if crontab is running on the system using pgrep (crond or cron)
|
if ! process_running "cron"; then
|
||||||
if ! pgrep "cron" > /dev/null; then
|
die "Crontab service is not running. Please start the cron service first."
|
||||||
echo -e "${RED}[x] Error: Crontab is not running.${NC}" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo -e "${GREEN}[✓] Crontab is running.${NC}"
|
log_success "Crontab service is running"
|
||||||
|
}
|
||||||
|
|
||||||
# Variables
|
# Prompt user to select scripts for installation
|
||||||
FILES=("clean.sh" "backup.sh" "docker-updater.sh")
|
select_scripts() {
|
||||||
|
local selected=()
|
||||||
|
|
||||||
# Prompt user to select files to download
|
echo >&2 # Send to stderr so it doesn't get captured
|
||||||
selected_files=()
|
echo -e "${GREY}[i] Available scripts for download and installation:${NC}" >&2
|
||||||
echo -e "${GREY}[i] Select files to download and install on crontab:${NC} "
|
echo >&2
|
||||||
for FILE in "${FILES[@]}"; do
|
|
||||||
read -p "Do you want to download and install $FILE? [Y/n]: " choice </dev/tty
|
for script in "${AVAILABLE_SCRIPTS[@]}"; do
|
||||||
if [[ "$choice" == "y" || "$choice" == "Y" || -z "$choice" ]]; then
|
local schedule="${CRONTAB_SCHEDULES[$script]:-"0 0 * * *"}"
|
||||||
selected_files+=("$FILE")
|
echo -e " ${LIGHT_BLUE}$script${NC} - Schedule: ${GREY}$schedule${NC}" >&2
|
||||||
|
done
|
||||||
|
|
||||||
|
echo >&2
|
||||||
|
echo -e "${GREY}[i] Select scripts to download and install:${NC}" >&2
|
||||||
|
|
||||||
|
for script in "${AVAILABLE_SCRIPTS[@]}"; do
|
||||||
|
read -p "Install $script? [Y/n]: " choice </dev/tty
|
||||||
|
if [[ "$choice" =~ ^[Yy]?$ ]]; then
|
||||||
|
selected+=("$script")
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
if [ ${#selected_files[@]} -eq 0 ]; then
|
if [[ ${#selected[@]} -eq 0 ]]; then
|
||||||
echo -e "${RED}[x] No files selected. Exiting...${NC}"
|
echo -e "${RED}[x] No scripts selected. Exiting...${NC}" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Check connection with the server for selected files
|
# Only output the selected scripts to stdout
|
||||||
echo -e "${GREY}[i] Checking connection with the server..."
|
printf '%s\n' "${selected[@]}"
|
||||||
for FILE in "${selected_files[@]}"; do
|
}
|
||||||
curl -s --head "$FILES_URL/$FILE" | head -n 1 | grep -E "HTTP/[12] [23].." > /dev/null
|
|
||||||
|
|
||||||
if [ $? -ne 0 ]; then
|
# Verify server connectivity for selected scripts
|
||||||
echo -e "${RED}[x] Error: $FILE not found on the server.${NC}" >&2
|
verify_server_connectivity() {
|
||||||
exit 1
|
local scripts=("$@")
|
||||||
|
|
||||||
|
log_info "Verifying server connectivity..."
|
||||||
|
|
||||||
|
for script in "${scripts[@]}"; do
|
||||||
|
local url="$FILES_URL/$script"
|
||||||
|
if ! curl -s --head "$url" | head -n 1 | grep -E "HTTP/[12] [23].." >/dev/null; then
|
||||||
|
die "Script '$script' not found on server: $url"
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
echo -e "${GREEN}[✓] Connection with the server established.${NC}"
|
log_success "Server connectivity verified"
|
||||||
|
}
|
||||||
echo -e "${GREY}[i] Downloading scripts..."
|
|
||||||
|
|
||||||
# Download selected scripts
|
# Download selected scripts
|
||||||
for FILE in "${selected_files[@]}"; do
|
download_scripts() {
|
||||||
curl -s -o "./$FILE" "$FILES_URL/$FILE"
|
local scripts=("$@")
|
||||||
done
|
|
||||||
|
|
||||||
echo -e "${GREEN}[✓] Scripts downloaded.${NC}"
|
log_info "Downloading ${#scripts[@]} script(s)..."
|
||||||
|
|
||||||
CURRENT_WORKDIR=$(pwd)
|
for script in "${scripts[@]}"; do
|
||||||
|
local url="$FILES_URL/$script"
|
||||||
|
log_step "Downloading $script..."
|
||||||
|
|
||||||
# Setup permissions
|
if ! curl -s -o "./$script" "$url"; then
|
||||||
echo -e "${GREY}[i] Setting up permissions..."
|
die "Failed to download $script from $url"
|
||||||
|
|
||||||
# Setup permissions for selected files
|
|
||||||
for FILE in "${selected_files[@]}"; do
|
|
||||||
chmod +x "./$FILE"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo -e "${GREEN}[✓] Permissions set up.${NC}"
|
|
||||||
|
|
||||||
# Setup crontab for selected files
|
|
||||||
echo -e "${GREY}[i] Setting up crontab..."
|
|
||||||
|
|
||||||
# Add crontabs
|
|
||||||
for FILE in "${selected_files[@]}"; do
|
|
||||||
if crontab -l 2>/dev/null | grep -q $FILE; then
|
|
||||||
echo -e "${LIGHT_BLUE}[i] [$FILE] Crontab already exists. Removing...${NC}"
|
|
||||||
crontab -l | grep -v $FILE | crontab -
|
|
||||||
fi
|
fi
|
||||||
echo -e "${LIGHT_BLUE}[i] [$FILE] Adding crontab...${NC}"
|
|
||||||
|
|
||||||
if [ "$FILE" == "clean.sh" ]; then
|
# Set executable permissions
|
||||||
(crontab -l 2>/dev/null; echo "0 23 * * * ${CURRENT_WORKDIR}/$FILE > /tmp/clean.log") | crontab -
|
chmod +x "./$script" || die "Failed to set executable permissions for $script"
|
||||||
elif [ "$FILE" == "backup.sh" ]; then
|
done
|
||||||
(crontab -l 2>/dev/null; echo "30 23 * * 1,5 ${CURRENT_WORKDIR}/$FILE > /tmp/backup.log") | crontab -
|
|
||||||
elif [ "$FILE" == "docker-updater.sh" ]; then
|
log_success "All scripts downloaded and configured"
|
||||||
(crontab -l 2>/dev/null; echo "0 3 */4 * * ${CURRENT_WORKDIR}/$FILE > /tmp/docker-updater.log") | crontab -
|
}
|
||||||
|
|
||||||
|
# Setup crontab entries for selected scripts
|
||||||
|
setup_crontab() {
|
||||||
|
local scripts=("$@")
|
||||||
|
local current_workdir
|
||||||
|
current_workdir=$(pwd)
|
||||||
|
|
||||||
|
log_info "Setting up crontab entries..."
|
||||||
|
|
||||||
|
for script in "${scripts[@]}"; do
|
||||||
|
local schedule="${CRONTAB_SCHEDULES[$script]:-"0 0 * * *"}"
|
||||||
|
local log_file="/tmp/${script%.*}.log"
|
||||||
|
local cron_entry="$schedule $current_workdir/$script > $log_file 2>&1"
|
||||||
|
|
||||||
|
log_step "Configuring crontab for $script (Schedule: $schedule)..."
|
||||||
|
|
||||||
|
# Remove existing crontab entry for this script
|
||||||
|
if crontab -l 2>/dev/null | grep -q "$script"; then
|
||||||
|
log_step "Removing existing crontab entry for $script..."
|
||||||
|
crontab -l 2>/dev/null | grep -v "$script" | crontab - || die "Failed to remove existing crontab entry"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Add new crontab entry
|
||||||
|
(crontab -l 2>/dev/null; echo "$cron_entry") | crontab - || die "Failed to add crontab entry for $script"
|
||||||
|
|
||||||
|
# Verify the entry was added
|
||||||
|
if ! crontab -l 2>/dev/null | grep -q "$script"; then
|
||||||
|
die "Failed to verify crontab entry for $script"
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_success "Crontab configured for $script"
|
||||||
|
done
|
||||||
|
|
||||||
|
log_success "All crontab entries configured successfully"
|
||||||
|
}
|
||||||
|
|
||||||
|
#==============================================================================
|
||||||
|
# MAIN EXECUTION
|
||||||
|
#==============================================================================
|
||||||
|
|
||||||
|
main() {
|
||||||
|
log_step "Starting Server Scripts Downloader"
|
||||||
|
echo
|
||||||
|
|
||||||
|
# System detection and validation
|
||||||
|
log_info "Detecting operating system..."
|
||||||
|
local detected_os
|
||||||
|
detected_os=$(detect_operating_system)
|
||||||
|
detected_os=$(echo "$detected_os" | tr -d '\n\r' | xargs) # Clean any whitespace/newlines
|
||||||
|
log_success "Detected $detected_os Linux"
|
||||||
|
|
||||||
|
# Package management
|
||||||
|
local missing_packages
|
||||||
|
readarray -t missing_packages < <(get_missing_packages)
|
||||||
|
|
||||||
|
# Filter out empty strings
|
||||||
|
local filtered_packages=()
|
||||||
|
for pkg in "${missing_packages[@]}"; do
|
||||||
|
if [[ -n "$pkg" ]]; then
|
||||||
|
filtered_packages+=("$pkg")
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
missing_packages=("${filtered_packages[@]}")
|
||||||
|
|
||||||
|
# Debug output
|
||||||
|
log_info "Debug: Found ${#missing_packages[@]} missing packages"
|
||||||
|
for i in "${!missing_packages[@]}"; do
|
||||||
|
log_info "Debug: Missing package $((i+1)): '${missing_packages[i]}'"
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ ${#missing_packages[@]} -gt 0 ]]; then
|
||||||
|
log_warning "Missing packages detected: ${missing_packages[*]}"
|
||||||
|
install_packages "$detected_os" "${missing_packages[@]}"
|
||||||
else
|
else
|
||||||
echo -e "${YELLOW}[w] [$FILE] Warning: Crontab specific schedule not setup.${NC}" >&2
|
log_success "All required packages are already installed"
|
||||||
(crontab -l 2>/dev/null; echo "0 0 * * * ${CURRENT_WORKDIR}/$FILE" > /tmp/$FILE.log) | crontab -
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo -e "${GREEN}[✓] [$FILE] Crontab added, double-checking set up...${NC}"
|
verify_packages
|
||||||
|
check_crontab_service
|
||||||
|
|
||||||
if ! crontab -l | grep -q $FILE; then
|
# Script selection and installation
|
||||||
echo -e "${RED}[x] [$FILE] Error: Crontab was not set up.${NC}" >&2
|
local selected_scripts
|
||||||
exit 1
|
readarray -t selected_scripts < <(select_scripts)
|
||||||
fi
|
|
||||||
|
|
||||||
echo -e "${GREEN}[✓] [$FILE] Crontab confirmed.${NC}"
|
log_info "Selected scripts: ${selected_scripts[*]}"
|
||||||
done
|
|
||||||
|
|
||||||
echo -e "${GREEN}[✓] Crontabs all set up.${NC}"
|
verify_server_connectivity "${selected_scripts[@]}"
|
||||||
|
download_scripts "${selected_scripts[@]}"
|
||||||
|
setup_crontab "${selected_scripts[@]}"
|
||||||
|
|
||||||
echo -e "${GREEN}[✓] All done.${NC}"
|
echo
|
||||||
|
log_success "Installation completed successfully!"
|
||||||
|
log_info "Scripts have been downloaded to: $(pwd)"
|
||||||
|
log_info "Crontab entries have been configured. Use 'crontab -l' to view them."
|
||||||
|
log_info "Log files will be created in /tmp/ directory."
|
||||||
|
}
|
||||||
|
|
||||||
|
# Execute main function
|
||||||
|
main "$@"
|
@@ -4,18 +4,41 @@ $7zipPath = "$env:ProgramFiles\7-Zip\7z.exe"
|
|||||||
if (!(Test-Path "$env:ProgramFiles\7-Zip\7z.exe")) {
|
if (!(Test-Path "$env:ProgramFiles\7-Zip\7z.exe")) {
|
||||||
Write-Host "7-Zip is not installed. Please install it to use this script."
|
Write-Host "7-Zip is not installed. Please install it to use this script."
|
||||||
exit 1
|
exit 1
|
||||||
|
Send-Notify "❌ 7-Zip is not installed. Backup aborted."
|
||||||
}
|
}
|
||||||
|
|
||||||
$BackupSource = @(
|
$BackupSource = @(
|
||||||
"$env:USERPROFILE\Documents",
|
"$env:USERPROFILE\Documents",
|
||||||
"$env:USERPROFILE\Desktop",
|
"$env:USERPROFILE\Desktop",
|
||||||
"$env:USERPROFILE\Pictures"
|
"$env:USERPROFILE\Pictures",
|
||||||
|
"$env:USERPROFILE\.ssh",
|
||||||
|
"$env:USERPROFILE\.kube"
|
||||||
)
|
)
|
||||||
|
|
||||||
$NASDestination = "\\OMV\Backup\$env:COMPUTERNAME"
|
$NASDestination = "\\OMV\Backup\$env:COMPUTERNAME"
|
||||||
$TempDir = "$env:TEMP\BackupTemp"
|
$TempDir = "$env:TEMP\BackupTemp"
|
||||||
$Date = Get-Date -Format "yyyy-MM-dd"
|
$Date = Get-Date -Format "yyyy-MM-dd"
|
||||||
|
|
||||||
|
$NotifyUrl = "http://notify.haven/notify"
|
||||||
|
|
||||||
|
function Send-Notify {
|
||||||
|
param (
|
||||||
|
[string]$Message
|
||||||
|
)
|
||||||
|
if (-not $NotifyUrl) {
|
||||||
|
Write-Host "NOTIFY_URL environment variable is not set. Notification not sent."
|
||||||
|
return
|
||||||
|
}
|
||||||
|
$Title = "Backup - $env:COMPUTERNAME"
|
||||||
|
$Body = @{ title = $Title; message = $Message } | ConvertTo-Json
|
||||||
|
try {
|
||||||
|
Invoke-RestMethod -Uri $NotifyUrl -Method Post -ContentType 'application/json' -Body $Body | Out-Null
|
||||||
|
Write-Host "Notification sent: $Title - $Message"
|
||||||
|
} catch {
|
||||||
|
Write-Host "Failed to send notification: $_"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
# Create temp directory
|
# Create temp directory
|
||||||
New-Item -ItemType Directory -Path $TempDir -Force
|
New-Item -ItemType Directory -Path $TempDir -Force
|
||||||
|
|
||||||
@@ -30,18 +53,26 @@ foreach ($Folder in $BackupSource) {
|
|||||||
$ZipFile = "$TempDir\$FolderName-$Date.zip"
|
$ZipFile = "$TempDir\$FolderName-$Date.zip"
|
||||||
|
|
||||||
Write-Host "Compressing $Folder..."
|
Write-Host "Compressing $Folder..."
|
||||||
& "$7zipPath" a -tzip "$ZipFile" "$Folder\*" -mx=9
|
$compressResult = & "$7zipPath" a -tzip "$ZipFile" "$Folder\*" -mx=9
|
||||||
|
if ($LASTEXITCODE -ne 0) {
|
||||||
|
Write-Host "Compression failed for $Folder."
|
||||||
|
Send-Notify "❌ Compression failed for $Folder."
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
Write-Host "Copying $ZipFile to NAS..."
|
Write-Host "Copying $ZipFile to NAS..."
|
||||||
Copy-Item $ZipFile $NASDestination -Force
|
Copy-Item $ZipFile $NASDestination -Force
|
||||||
|
|
||||||
Write-Host "Removing $ZipFile..."
|
Write-Host "Removing $ZipFile..."
|
||||||
Remove-Item $ZipFile
|
Remove-Item $ZipFile
|
||||||
|
} else {
|
||||||
|
Write-Host "Source folder not found: $Folder"
|
||||||
|
Send-Notify "⚠️ Source folder not found: $Folder"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Write-Host "Removing Files older than 15 days from $NASDestination..."
|
Write-Host "Removing Files older than 7 days from $NASDestination..."
|
||||||
$OldFiles = Get-ChildItem -Path $NASDestination -File | Where-Object { $_.LastWriteTime -lt (Get-Date).AddDays(-15) }
|
$OldFiles = Get-ChildItem -Path $NASDestination -File | Where-Object { $_.LastWriteTime -lt (Get-Date).AddDays(-7) }
|
||||||
foreach ($OldFile in $OldFiles) {
|
foreach ($OldFile in $OldFiles) {
|
||||||
Remove-Item $OldFile.FullName -Force
|
Remove-Item $OldFile.FullName -Force
|
||||||
Write-Host "Removed: $($OldFile.FullName)"
|
Write-Host "Removed: $($OldFile.FullName)"
|
||||||
@@ -50,3 +81,4 @@ foreach ($OldFile in $OldFiles) {
|
|||||||
# Cleanup
|
# Cleanup
|
||||||
Remove-Item $TempDir -Recurse -Force
|
Remove-Item $TempDir -Recurse -Force
|
||||||
Write-Host "Backup completed!"
|
Write-Host "Backup completed!"
|
||||||
|
Send-Notify "✅ Backup completed successfully."
|
Reference in New Issue
Block a user