Compare commits

...

9 Commits

Author SHA1 Message Date
75fab63809 Merge branch 'main' of git.ivanch.me:ivanch/opencand.infra 2025-09-12 21:25:42 -03:00
e88a5a4cf4 improving db connection 2025-09-12 21:25:39 -03:00
3721e1fd30 gitignore 2025-06-20 22:34:22 +00:00
d9fa233c6c chmod x 2025-06-20 22:33:37 +00:00
4b2e2a764b db scripts 2025-06-19 20:44:11 -03:00
e62713b377 going back and using profiles 2025-06-18 20:14:04 -03:00
8aba3107cf removing useless depends on 2025-06-18 19:08:31 -03:00
7fe8d21d38 shm_size 4gb 2025-06-18 18:36:55 -03:00
ed950811b6 dividing dokcer compose into two 2025-06-18 18:35:58 -03:00
5 changed files with 187 additions and 254 deletions

3
.gitignore vendored
View File

@@ -1,3 +0,0 @@
**/etl-data
**/fotos_cand
*.zip

56
backup-db.sh Executable file
View File

@@ -0,0 +1,56 @@
#!/bin/bash
# OpenCand Database Backup Script
# This script creates a backup of the PostgreSQL database (excluding materialized views)
set -e # Exit on any error
# Configuration
DB_CONTAINER="opencand_db"
DB_NAME="opencand"
DB_USER="root"
BACKUP_FILE="backup_$(date +%Y%m%d_%H%M%S).sql"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
echo -e "${YELLOW}💾 OpenCand Database Backup Script${NC}"
echo "=================================="
# Check if Docker is running
if ! docker info >/dev/null 2>&1; then
echo -e "${RED}❌ Error: Docker is not running or not accessible${NC}"
exit 1
fi
# Check if the database container is running
if ! docker ps --format "table {{.Names}}" | grep -q "^${DB_CONTAINER}$"; then
echo -e "${RED}❌ Error: Database container '${DB_CONTAINER}' is not running${NC}"
echo "Please start the services with: docker-compose up -d"
exit 1
fi
echo -e "${YELLOW}📋 Creating backup (excluding materialized views)...${NC}"
# Get list of materialized views
MATVIEWS=$(docker exec $DB_CONTAINER psql -U $DB_USER -d $DB_NAME -Atc "SELECT schemaname || '.' || matviewname FROM pg_matviews;")
# Build exclude flags for pg_dump
EXCLUDE_FLAGS=""
for mv in $MATVIEWS; do
EXCLUDE_FLAGS+=" --exclude-table=$mv"
done
# Run pg_dump excluding materialized views
if docker exec $DB_CONTAINER pg_dump -U $DB_USER -d $DB_NAME $EXCLUDE_FLAGS > "$BACKUP_FILE"; then
echo -e "${GREEN}✅ Backup created: $BACKUP_FILE${NC}"
else
echo -e "${RED}❌ Error: Failed to create backup${NC}"
exit 1
fi
echo -e "${GREEN}🎉 Backup completed successfully!${NC}"
echo "=================================="

View File

@@ -11,6 +11,8 @@ DB_NAME="opencand"
DB_USER="root"
SQL_URL="https://git.ivanch.me/ivanch/opencand/raw/branch/main/db/db.sql"
SQL_FILE="./db.sql"
MV_URL="https://git.ivanch.me/ivanch/opencand/raw/branch/main/db/mv.sql"
MV_FILE="./mv.sql"
# Colors for output
RED='\033[0;31m'
@@ -34,20 +36,32 @@ if ! docker ps --format "table {{.Names}}" | grep -q "^${DB_CONTAINER}$"; then
exit 1
fi
# Download the SQL file
echo -e "${YELLOW}📥 Downloading SQL file...${NC}"
# Download the SQL files
echo -e "${YELLOW}📥 Downloading SQL files...${NC}"
if command -v curl &> /dev/null; then
if curl -L -o "$SQL_FILE" "$SQL_URL"; then
echo "✅ SQL file downloaded successfully"
echo "✅ db.sql downloaded successfully"
else
echo -e "${RED}❌ Error: Failed to download SQL file from ${SQL_URL}${NC}"
echo -e "${RED}❌ Error: Failed to download db.sql from ${SQL_URL}${NC}"
exit 1
fi
if curl -L -o "$MV_FILE" "$MV_URL"; then
echo "✅ mv.sql downloaded successfully"
else
echo -e "${RED}❌ Error: Failed to download mv.sql from ${MV_URL}${NC}"
exit 1
fi
elif command -v wget &> /dev/null; then
if wget -O "$SQL_FILE" "$SQL_URL"; then
echo "✅ SQL file downloaded successfully"
echo "✅ db.sql downloaded successfully"
else
echo -e "${RED}❌ Error: Failed to download SQL file from ${SQL_URL}${NC}"
echo -e "${RED}❌ Error: Failed to download db.sql from ${SQL_URL}${NC}"
exit 1
fi
if wget -O "$MV_FILE" "$MV_URL"; then
echo "✅ mv.sql downloaded successfully"
else
echo -e "${RED}❌ Error: Failed to download mv.sql from ${MV_URL}${NC}"
exit 1
fi
else
@@ -55,16 +69,21 @@ else
exit 1
fi
# Check if SQL file exists (after download)
# Check if SQL files exist (after download)
if [ ! -f "$SQL_FILE" ]; then
echo -e "${RED}❌ Error: SQL file '${SQL_FILE}' not found after download${NC}"
exit 1
fi
if [ ! -f "$MV_FILE" ]; then
echo -e "${RED}❌ Error: SQL file '${MV_FILE}' not found after download${NC}"
exit 1
fi
echo -e "${YELLOW}📋 Pre-deployment checks:${NC}"
echo "✅ Docker is running"
echo "✅ Database container is running"
echo "✅ SQL file downloaded"
echo "✅ db.sql downloaded"
echo "✅ mv.sql downloaded"
echo ""
# Wait for database to be ready
@@ -82,36 +101,48 @@ done
echo "✅ Database is ready"
echo ""
# Create backup before deployment
echo -e "${YELLOW}💾 Creating database backup...${NC}"
BACKUP_FILE="backup_$(date +%Y%m%d_%H%M%S).sql"
docker exec $DB_CONTAINER pg_dump -U $DB_USER -d $DB_NAME > $BACKUP_FILE
echo "✅ Backup created: $BACKUP_FILE"
echo ""
# Deploy the SQL file
echo -e "${YELLOW}🔧 Deploying database changes...${NC}"
# Deploy the db.sql file
echo -e "${YELLOW}🔧 Deploying database changes (db.sql)...${NC}"
echo "Executing: $SQL_FILE"
if docker exec -i $DB_CONTAINER psql -U $DB_USER -d $DB_NAME < $SQL_FILE; then
echo -e "${GREEN}Database deployment completed successfully!${NC}"
echo ""
# Show table information
echo -e "${YELLOW}📊 Current database tables:${NC}"
docker exec $DB_CONTAINER psql -U $DB_USER -d $DB_NAME -c "\dt"
echo -e "${GREEN}db.sql deployment completed successfully!${NC}"
else
echo -e "${RED}❌ Error: Database deployment failed${NC}"
echo -e "${RED}❌ Error: db.sql deployment failed${NC}"
echo -e "${YELLOW}💡 You can restore from backup using:${NC}"
echo "docker exec -i $DB_CONTAINER psql -U $DB_USER -d $DB_NAME < $BACKUP_FILE"
# Clean up before exit
rm -f "$SQL_FILE" "$MV_FILE"
exit 1
fi
# Clean up downloaded SQL file
# Deploy the mv.sql file
echo -e "${YELLOW}<EFBFBD> Deploying database changes (mv.sql)...${NC}"
echo "Executing: $MV_FILE"
if docker exec -i $DB_CONTAINER psql -U $DB_USER -d $DB_NAME < $MV_FILE; then
echo -e "${GREEN}✅ mv.sql deployment completed successfully!${NC}"
else
echo -e "${RED}❌ Error: mv.sql deployment failed${NC}"
echo -e "${YELLOW}💡 You can restore from backup using:${NC}"
echo "docker exec -i $DB_CONTAINER psql -U $DB_USER -d $DB_NAME < $BACKUP_FILE"
# Clean up before exit
rm -f "$SQL_FILE" "$MV_FILE"
exit 1
fi
# Show table and materialized view information
echo -e "${YELLOW}📊 Current database tables:${NC}"
docker exec $DB_CONTAINER psql -U $DB_USER -d $DB_NAME -c "\dt"
echo -e "${YELLOW}📄 Current materialized views:${NC}"
docker exec $DB_CONTAINER psql -U $DB_USER -d $DB_NAME -c "\dm"
# Clean up downloaded SQL files
echo -e "${YELLOW}🧹 Cleaning up...${NC}"
rm -f "$SQL_FILE"
echo "✅ Temporary SQL file removed"
rm -f "$SQL_FILE" "$MV_FILE"
echo "✅ Temporary SQL files removed"
echo ""
echo -e "${GREEN}🎉 Deployment completed successfully!${NC}"

View File

@@ -1,11 +1,77 @@
services: # ───────────────────────────────────────────────────────────────────────────
# 1. PostgreSQL Database
services:
# ───────────────────────────────────────────────────────────────────────────
# 1. .NET API
# ───────────────────────────────────────────────────────────────────────────
api:
container_name: opencand_api
restart: unless-stopped
image: git.ivanch.me/ivanch/opencand.api:latest
ports:
- "5100:8080"
environment:
ASPNETCORE_ENVIRONMENT: "Production"
Logging__LogLevel__Default: "Information"
DatabaseSettings__ConnectionString: "Host=db;Port=5432;Database=opencand;Username=root;Password=root;Pooling=true;Minimum Pool Size=1;Maximum Pool Size=20;Connection Lifetime=300;Command Timeout=30;Application Name=OpenCand.API;Include Error Detail=true"
FotosSettings__ApiBasePath: "https://api.opencand.ivanch.me/assets/fotos"
volumes:
- ./fotos_cand:/app/fotos_cand
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
labels: "service=api"
labels:
- "promtail.enable=true"
- "promtail.job=opencand-api"
# ───────────────────────────────────────────────────────────────────────────
# 2. React + Vite Frontend
# ───────────────────────────────────────────────────────────────────────────
frontend:
container_name: opencand_frontend
restart: unless-stopped
image: git.ivanch.me/ivanch/opencand.ui:latest
ports:
- "5110:80"
depends_on:
- api
# ───────────────────────────────────────────────────────────────────────────
# 3. ETL (Optional: runs once at startup)
#
# If you want the ETL to run on every compose up, give it restart: "no" or
# some other policy. It will run, then exit.
#
# If you instead prefer to run ETL manually or via host cron, you can omit
# this service and just `docker run myorg/etl:latest ...` on demand.
# ───────────────────────────────────────────────────────────────────────────
etl:
image: git.ivanch.me/ivanch/opencand.etl:latest
container_name: opencand_etl
restart: "no"
environment:
DatabaseSettings__ConnectionString: "Host=db;Port=5432;Database=opencand;Username=root;Password=root"
ParserSettings_CandidatoCSVThreads: "40"
BasePath: "etl-data"
volumes:
- ./etl-data:/app/etl-data
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
labels: "service=etl"
labels:
- "promtail.enable=true"
- "promtail.job=opencand-etl"
db:
image: postgres:14-alpine
container_name: opencand_db
profiles: ["infra"]
restart: unless-stopped
hostname: db
shm_size: 4g
environment:
POSTGRES_USER: root
POSTGRES_PASSWORD: root
@@ -27,85 +93,13 @@ services: # ──────────────────────
- "promtail.enable=true"
- "promtail.job=opencand-db"
# ───────────────────────────────────────────────────────────────────────────
# 2. .NET API
# ───────────────────────────────────────────────────────────────────────────
api:
container_name: opencand_api
restart: unless-stopped
image: git.ivanch.me/ivanch/opencand.api:latest
ports:
- "5100:8080"
depends_on:
db:
condition: service_healthy
environment:
ASPNETCORE_ENVIRONMENT: "Production"
DatabaseSettings__ConnectionString: "Host=db;Port=5432;Database=opencand;Username=root;Password=root"
FotosSettings__ApiBasePath: "https://api.opencand.ivanch.me/assets/fotos"
volumes:
- ./fotos_cand:/app/fotos_cand
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
labels: "service=api"
labels:
- "promtail.enable=true"
- "promtail.job=opencand-api"
# ───────────────────────────────────────────────────────────────────────────
# 3. React + Vite Frontend
# ───────────────────────────────────────────────────────────────────────────
frontend:
container_name: opencand_frontend
restart: unless-stopped
image: git.ivanch.me/ivanch/opencand.ui:latest
ports:
- "5110:80"
depends_on:
- api
# ───────────────────────────────────────────────────────────────────────────
# 4. ETL (Optional: runs once at startup)
#
# If you want the ETL to run on every compose up, give it restart: "no" or
# some other policy. It will run, then exit.
#
# If you instead prefer to run ETL manually or via host cron, you can omit
# this service and just `docker run myorg/etl:latest ...` on demand.
# ───────────────────────────────────────────────────────────────────────────
etl:
image: git.ivanch.me/ivanch/opencand.etl:latest
container_name: opencand_etl
restart: "no"
depends_on:
db:
condition: service_healthy
loki:
condition: service_started
environment:
DatabaseSettings__ConnectionString: "Host=db;Port=5432;Database=opencand;Username=root;Password=root"
ParserSettings_CandidatoCSVThreads: "40"
BasePath: "etl-data"
volumes:
- ./etl-data:/app/etl-data
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
labels: "service=etl"
labels:
- "promtail.enable=true"
- "promtail.job=opencand-etl"
# ───────────────────────────────────────────────────────────────────────────
# 5. Log Monitoring Stack
# ───────────────────────────────────────────────────────────────────────────
# Loki - Log aggregation system
loki:
image: grafana/loki:2.9.0
container_name: opencand_loki
profiles: ["infra"]
restart: unless-stopped
ports:
- "127.0.0.1:6110:3100"
@@ -123,6 +117,7 @@ services: # ──────────────────────
promtail:
image: grafana/promtail:2.9.0
container_name: opencand_promtail
profiles: ["infra"]
restart: unless-stopped
volumes:
- ./monitoring/promtail-config.yaml:/etc/promtail/config.yml
@@ -137,6 +132,7 @@ services: # ──────────────────────
prometheus:
image: prom/prometheus:v2.45.0
container_name: opencand_prometheus
profiles: ["infra"]
restart: unless-stopped
ports:
- "127.0.0.1:6090:9090"
@@ -155,6 +151,7 @@ services: # ──────────────────────
node-exporter:
image: prom/node-exporter:v1.6.0
container_name: opencand_node_exporter
profiles: ["infra"]
restart: unless-stopped
ports:
- "127.0.0.1:6100:9100"
@@ -171,6 +168,7 @@ services: # ──────────────────────
grafana:
image: grafana/grafana:10.0.0
container_name: opencand_grafana
profiles: ["infra"]
restart: unless-stopped
ports:
- "6000:3000"

View File

@@ -1,149 +0,0 @@
# OpenCand Monitoring Setup
This monitoring stack provides comprehensive log aggregation and visualization for the OpenCand project, with special focus on the ETL service.
## Services Overview
### 🔍 **Grafana Loki** (Port 3100)
- **Purpose**: Log aggregation and storage
- **Access**: http://localhost:3100
- **Description**: Collects and stores all container logs in a structured format
### 📊 **Grafana** (Port 3000)
- **Purpose**: Log visualization and dashboards
- **Access**: http://localhost:3000
- **Credentials**:
- Username: `admin`
- Password: `admin`
- **Pre-configured Dashboards**: OpenCand ETL Monitoring dashboard
### 📈 **Prometheus** (Port 9090)
- **Purpose**: Metrics collection and storage
- **Access**: http://localhost:9090
- **Description**: Collects system and application metrics
### 🖥️ **Node Exporter** (Port 9100)
- **Purpose**: System metrics collection
- **Access**: http://localhost:9100/metrics
- **Description**: Provides host system metrics (CPU, memory, disk, etc.)
### 🚚 **Promtail**
- **Purpose**: Log collection agent
- **Description**: Automatically discovers and ships Docker container logs to Loki
## Key Features
### ETL-Specific Monitoring
- ✅ Real-time ETL process logs
- ✅ Error tracking and alerting capabilities
- ✅ Performance metrics monitoring
- ✅ Data processing progress tracking
### Container Log Management
- ✅ Automatic log rotation (10MB max size, 3 files)
- ✅ Structured log labeling
- ✅ Multi-service log aggregation
### Pre-built Dashboards
- ✅ OpenCand ETL Logs viewer
- ✅ API logs monitoring
- ✅ Database logs tracking
- ✅ Container resource usage
## Getting Started
1. **Start the monitoring stack**:
```bash
docker-compose up -d
```
2. **Access Grafana**:
- Open http://localhost:3000
- Login with admin/admin
- Navigate to "Dashboards" → "OpenCand ETL Monitoring"
3. **View ETL Logs in Real-time**:
- In Grafana, go to "Explore"
- Select "Loki" as datasource
- Use query: `{container_name="opencand_etl"}`
4. **Monitor System Metrics**:
- Access Prometheus at http://localhost:9090
- View system metrics from Node Exporter
## Log Queries Examples
### ETL Service Logs
```logql
{container_name="opencand_etl"}
```
### Error Logs Only
```logql
{container_name="opencand_etl"} |= "ERROR"
```
### API Logs with Filtering
```logql
{container_name="opencand_api"} |= "Microsoft.AspNetCore"
```
### Database Connection Logs
```logql
{container_name="opencand_db"} |= "connection"
```
## Configuration Files
- **Loki**: `./monitoring/loki-config.yaml`
- **Promtail**: `./monitoring/promtail-config.yaml`
- **Prometheus**: `./monitoring/prometheus.yml`
- **Grafana Datasources**: `./monitoring/grafana/provisioning/datasources/`
- **Grafana Dashboards**: `./monitoring/grafana/provisioning/dashboards/`
## Data Persistence
The following volumes are created for data persistence:
- `loki-data`: Loki log storage
- `prometheus-data`: Prometheus metrics storage
- `grafana-data`: Grafana dashboards and settings
## Troubleshooting
### ETL Logs Not Appearing
1. Check if ETL container is running: `docker ps`
2. Verify Promtail is collecting logs: `docker logs opencand_promtail`
3. Check Loki status: `curl http://localhost:3100/ready`
### Grafana Dashboard Issues
1. Verify datasources are configured correctly
2. Check if Loki is accessible from Grafana container
3. Restart Grafana container: `docker-compose restart grafana`
### Performance Issues
1. Monitor disk usage for log storage
2. Adjust log retention in `loki-config.yaml`
3. Increase resource limits if needed
## Customization
### Adding More Dashboards
1. Create JSON dashboard files in `./monitoring/grafana/provisioning/dashboards/`
2. Restart Grafana container
### Log Retention Configuration
Edit `./monitoring/loki-config.yaml` to adjust retention policies:
```yaml
limits_config:
retention_period: 168h # 7 days
```
### Alert Configuration
Add alerting rules to Prometheus configuration for ETL failure notifications.
## Security Notes
- Change default Grafana admin password in production
- Restrict network access to monitoring ports
- Consider using authentication for external access
- Regularly update monitoring stack images